repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
henryscala/plain_sequence_chart | src/canvas.py | 1 | 5513 | import array
class Canvas:
BLANK=' '
HLINE='-'
VLINE='|'
HLARROW='<'
HRARROW='>'
VUARROW='^'
VDARROW='v'
INTERSECT='+'
XINTERSECT='*'
WAVEVLLINE='('
WAVEVRLINE=')'
WAVEHLINE='~'
def __init__(self,col,row):
self.row=row
self.column=col
self.canvas=array.array('b',[ord(self.BLANK)]*(row*col))
def __draw(self,col,row,char):
self.canvas[self.column*row+col]=ord(char)
def reset(self):
for i in range(self.column*self.row):
self.canvas[i]=self.BLANK
def output(self):
for i in range(self.row):
lineStart=self.column*i
line=self.canvas[lineStart:lineStart+self.column].tostring().decode('utf-8')
line = line.rstrip()
if len(line) > 0:
print (line)
def point(self,col,row,char):
self.__draw(col,row,char)
def hline(self,col,row,length,direction=None,arrow=None,hChar=HLINE):
start=col
stop=col+length
if direction:
start=col-length+1
stop=col+1
for i in range(start,stop):
self.point(i,row,hChar)
if arrow:
if direction:
self.point(start,row,self.HLARROW)
else:
self.point(stop-1,row,self.HRARROW)
def vline(self,col,row,length,direction=None,arrow=None,vChar=VLINE):
start=row
stop=row+length
if direction:
start=row-length+1
stop=row+1
for i in range(start,stop):
self.point(col,i,vChar)
if arrow:
if direction:
self.point(col,start,Canvas.VUARROW)
else:
self.point(col,stop-1,Canvas.VDARROW)
def rect(self,col,row,width,height):
self.vline(col,row,height)
self.vline(col+width-1,row,height)
self.hline(col+1,row+height-1,width-2)
self.hline(col+1,row,width-2)
def waveRect(self,col,row,width,height):
self.vline(col,row,height,vChar=self.WAVEVLLINE)
self.vline(col+width-1,row,height,vChar=self.WAVEVRLINE)
self.hline(col+1,row+height-1,width-2,hChar=self.WAVEHLINE)
self.hline(col+1,row,width-2,hChar=self.WAVEHLINE)
def text(self,col,row,astr,center=None):
left=col
if center:
left=col-len(astr)//2
for i in range(len(astr)):
self.point(left+i,row,astr[i])
def __textRect(self,str,width=None):
strlen=len(str)
if not width :
cols=strlen
rows=1
elif strlen<=width:
cols=width
rows=1
else:
cols=width
rows=strlen//width
remain=strlen % width
if remain:
rows +=1
return (cols,rows)
def rectText(self,col,row,astr,width=None,center=None):
cols,rows=self.__textRect(astr,width)
for i in range(rows):
line=astr[cols*i:cols*i+cols]
if center:
self.text(col,row+1+i,line,center)
left=col-cols//2-1
top=row
width=cols+2
height=rows+2
self.rect(left,top,width,height)
else:
self.text(col+1,row+1+i,line,center)
left=col
top=row
width=cols+2
height=rows+2
self.rect(left,top,width,height)
return (width,height)
def waveRectText(self,col,row,astr,width=None,center=None):
cols,rows=self.__textRect(astr,width)
for i in range(rows):
line=astr[cols*i:cols*i+cols]
if center:
self.text(col,row+1+i,line,center)
left=col-cols//2-1
top=row
width=cols+2
height=rows+2
self.waveRect(left,top,width,height)
else:
self.text(col+1,row+1+i,line,center)
left=col
top=row
width=cols+2
height=rows+2
self.waveRect(left,top,width,height)
return (width,height)
def ordAt(self,col,row):
return self.canvas[self.column*row+col]
def isRowBlank(self,row):
for c in range(self.column):
if self.ordAt(c,row)!=ord(self.BLANK):
return False
return True
def isColumnBlank(self,column):
for r in range(self.row):
if self.ordAt(column,r)!=ord(self.BLANK):
return False
return True
def shiftLeft(self,fromColumn, numOfColumn=1):
for r in range(self.row):
for c in range(fromColumn,self.column):
self.point(c - numOfColumn, r, chr(self.ordAt(c,r)))
def shiftTop(self,fromRow, numOfRow=1):
for c in range(self.column):
for r in range(fromRow,self.row):
self.point(c, r-numOfRow, chr(self.ordAt(c,r)))
def trimLeftTop(self):
while self.isColumnBlank(0):
self.shiftLeft(1)
while self.isRowBlank(0):
self.shiftTop(1)
| gpl-3.0 | 2,124,022,629,261,501,000 | 29.627778 | 92 | 0.498821 | false |
fragaria/BorIS | boris/services/migrations/0019_workforclient.py | 1 | 1264 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('services', '0018_delete_pregnancytest'),
]
operations = [
migrations.CreateModel(
name='WorkForClient',
fields=[
('service_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='services.Service')),
('contact_institution', models.BooleanField(default=False, verbose_name='a) kontakt s institucemi')),
('message', models.BooleanField(default=False, verbose_name='b) zpr\xe1va, doporu\u010den\xed')),
('search_information', models.BooleanField(default=False, verbose_name='c) vyhled\xe1v\xe1n\xed a zji\u0161\u0165ov\xe1n\xed informac\xed pro klienta')),
('case_conference', models.BooleanField(default=False, verbose_name='d) p\u0159\xedpadov\xe1 konference')),
],
options={
'verbose_name': 'Pr\xe1ce ve prosp\u011bch klienta',
'verbose_name_plural': 'Pr\xe1ce ve prosp\u011bch klienta',
},
bases=('services.service',),
),
]
| mit | -2,708,543,291,182,270,000 | 42.586207 | 169 | 0.612342 | false |
morgenfree/column-store-tbat-exp-python | prepare/updateData.py | 1 | 5865 | '''
updateBun bun to update
bat_file
update_file
'''
__author__ = 'fyu'
from config import *
BUFFERING_SIZE=1048576
#BUFFERING_SIZE=10
def updateBATFast(bat_file_name,update_file_name):
bat_file=open(bat_file_name,'r+')
update_file=open(update_file_name,'r')
for updateLine in update_file:
(updateLineNumStr,updateValue)=updateLine.split(',')
updateLineNum=long(updateLineNumStr)
bat_file.seek((updateLineNum-1)*len(updateLine))
bat_file.write(updateLine)
bat_file.seek(0)
bat_file.close()
update_file.close()
def updateBATFast2(bat_file_name,update_file_name):
with open(bat_file_name, 'r+', buffering=BUFFERING_SIZE) as bat_file:
with open(update_file_name, 'r', buffering=BUFFERING_SIZE) as update_file:
for updateLine in update_file:
(updateLineNumStr,updateValue)=updateLine.split(',')
updateLineNum=long(updateLineNumStr)
bat_file.seek((updateLineNum-1)*len(updateLine))
bat_file.write(updateLine)
bat_file.seek(0)
# bat_file.close()
# update_file.close()
def updateBAT1(bat_file_name,update_file_name):
bat_file=open(bat_file_name,'r+', buffering=BUFFERING_SIZE)
update_file=open(update_file_name,'r', buffering=BUFFERING_SIZE)
for updateLine in update_file:
(updateLineNumStr,updateValue)=updateLine.split(',')
updateLineNum=long(updateLineNumStr)
currentLineNum=1
while currentLineNum < updateLineNum: # simulating seeking next line
bat_file.seek(len(updateLine),1)
currentLineNum+=1
# bat_file.seek((currentLineNum-1)*len(updateLine))
# bat_file.seek((updateLineNum-1)*len(updateLine))
# print '%d\n' % currentLineNum
bat_file.write(updateLine)
bat_file.seek(0)
bat_file.close()
update_file.close()
def updateBAT2(bat_file_name,update_file_name):
bat_file=open(bat_file_name,'r+', buffering=BUFFERING_SIZE)
update_file=open(update_file_name,'r', buffering=BUFFERING_SIZE)
for updateLine in update_file:
(updateLineNumStr,updateValue)=updateLine.split(',')
updateLineNum=long(updateLineNumStr)
currentLineNum=1
while currentLineNum < updateLineNum: # simulating seeking next line
#print '%d\n' % bat_file.tell()
bat_file.seek(1,1)
currentLineNum+=1
# bat_file.seek((currentLineNum-1)*len(updateLine))
# bat_file.seek((updateLineNum-1)*len(updateLine))
print '%d\n' % currentLineNum
bat_file.write(updateLine)
bat_file.seek(0)
bat_file.close()
update_file.close()
def updateBAT3(bat_file_name,update_file_name):
with open(bat_file_name,'r+', buffering=BUFFERING_SIZE) as bat_file:
with open(update_file_name,'r', buffering=BUFFERING_SIZE) as update_file:
for updateLine in update_file:
(updateLineNumStr,updateValue)=updateLine.split(',')
updateLineNum=long(updateLineNumStr)
currentLineNum=1
updateLineLength=len(updateLine)
while currentLineNum < updateLineNum: # simulating seeking next line
bat_file.seek(updateLineLength, 1)
currentLineNum += 1
#print '%d\n' % currentLineNum
bat_file.write(updateLine)
bat_file.seek(0)
def updateTBAT(tbat_file_name,update_file_name):
updateTimeStamp=time.time()
tbat_file=open(tbat_file_name,'a', buffering=BUFFERING_SIZE)
update_file=open(update_file_name,'r', buffering=BUFFERING_SIZE)
for updateLine in update_file:
updateLine='%10g,%s' %(updateTimeStamp,updateLine)
# print updateLine
tbat_file.write(updateLine)
tbat_file.close()
update_file.close()
'''
def updateTBAT(tbat_file_name,update_file_name):
updateTimeStamp=time.time()
tbat_file=open(tbat_file_name,'a')
update_file=open(update_file_name,'r')
for updateLine in update_file:
updateLine='%10g,%s' %(updateTimeStamp,updateLine)
# print updateLine
tbat_file.write(updateLine)
tbat_file.close()
update_file.close()
'''
if __name__=='__main__':
bat_time_start=time.time()
updateBAT(bat_file_name,update_file_name)
bat_time=time.time()-bat_time_start
print 'bat update time:'+str(bat_time)
tbat_time_start=time.time()
updateTBAT(tbat_file_name,update_file_name)
tbat_time=time.time()-tbat_time_start
print 'tbat update time:'+str(tbat_time)
overhead=(bat_time)/tbat_time*100
print 'overhead=%g%%' % (overhead)
'''
def updateBAT(bat_file_name,update_file_name):
bat_file=open(bat_file_name,'r+')
update_file=open(update_file_name,'r')
for updateLine in update_file:
(updateLineNumStr,updateValue)=updateLine.split(',')
#print updateLineNumStr+','+updateValue
updateLineNum=long(updateLineNumStr)
bat_file.seek((updateLineNum-1)*len(updateLine))
bat_file.write(updateLine)
bat_file.close()
update_file.close()
'''
'''
def updateBAT(bat_file_name,update_file_name):
bat_file=open(bat_file_name,'r+')
update_file=open(update_file_name,'r')
for updateLine in update_file:
updateLineNum=long(updateLine.split(',')[0])
seekLine=0
bat_file.seek(0)
for currentLine in bat_file: # simulate seeking the line to change
currentLineNum=long(currentLine.split(',')[0])
if currentLineNum == updateLineNum:
#print 'change line: %d' % (currentLineNum)
bat_file.seek(seekLine*len(currentLine))
bat_file.write(updateLine)
break
else:
seekLine+=1
bat_file.close()
update_file.close()
''' | gpl-2.0 | -2,389,603,193,760,424,000 | 35.434783 | 84 | 0.637852 | false |
Samsung/ADBI | idk/cachebuilder/sections.py | 1 | 1516 | from collections import namedtuple
import logging
from elftools.elf.constants import SH_FLAGS
Section = namedtuple('Section', 'id name type addr offset size flags')
class Sections(object):
def __init__(self, debuginfo):
self.debuginfo = debuginfo
def iter_sections():
# omit first section - it is always null section
for idx in range(1, self.debuginfo.elf.num_sections()):
section = self.debuginfo.elf.get_section(idx)
h = section.header
yield Section(idx, section.name, h['sh_type'], h['sh_addr'], h['sh_offset'], h['sh_size'], h['sh_flags'])
self.sections = list(iter_sections())
def addr2fo(self, addr):
'''Convert given virtual address to file offset.'''
for section in [s for s in self.sections if s.flags & SH_FLAGS.SHF_ALLOC]:
lo = section.addr
hi = lo + section.size
if lo <= addr < hi:
offset = addr - lo
return section.offset + offset
raise ValueError('Address %x is invalid.' % addr)
def store(self, conn):
logging.debug('Storing ELF sections')
query = 'insert into sections(id, name, type, addr, offset, size, flags) values (?, ?, ?, ?, ?, ?, ?)'
items = ((section.id, section.name, section.type, section.addr, section.offset, section.size, section.flags)
for section in self.sections )
conn.executemany(query, items)
conn.commit() | apache-2.0 | 7,481,293,933,677,235,000 | 41.138889 | 121 | 0.591689 | false |
yinwenpeng/rescale | wenpeng.py | 1 | 1057 | #!/usr/bin/env python
import time
import threading
import sys
def Traverse(rootDir):
fileNo=0
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
#print path
if os.path.isdir(path):
Traverse(path)
elif os.path.isfile(path):
file = open(path)
#ReadFile(file)
ReadFile(file, -1)
fileNo+=1
'''
if fileNo > 0:
return
'''
def calc_froebius_norm(m):
time.sleep(1)
return m
def calc_norm(m, i, norms):
print >> sys.stderr, 'Starting thread', i
norm = calc_froebius_norm(m)
norms[i] = norm
def main():
matrixes = [1, 2, 3, 4]
norms = [0] * len(matrixes)
threads = []
for i, m in enumerate(matrixes):
t = threading.Thread(target=calc_norm, args=(m, i, norms))
t.start()
threads.append(t)
for thread in threads:
t.join()
print >> sys.stderr, norms
if __name__ == '__main__':
main()
| gpl-3.0 | -4,074,478,246,556,086,300 | 17.875 | 66 | 0.512772 | false |
OpenMOOC/moocng | moocng/media_contents/__init__.py | 1 | 2157 | import json
from django.conf import settings
from . import handlers
def media_content_get_iframe_code(handler, content_id, **kwargs):
handler = handlers.get_handler(handler)
return handler.get_iframe_code(content_id, **kwargs)
def media_content_get_thumbnail_url(handler, content_id, **kwargs):
handler = handlers.get_handler(handler)
return handler.get_thumbnail_url(content_id, **kwargs)
def media_content_get_iframe_template(handler, content_id, **kwargs):
handler = handlers.get_handler(handler)
return handler.get_iframe_template(content_id, **kwargs)
def media_content_get_js_code(handler, **kwargs):
handler = handlers.get_handler(handler)
return handler.get_javascript_code(**kwargs)
def media_content_get_last_frame(handler, content_id, tmpdir, **kwargs):
handler = handlers.get_handler(handler)
return handler.get_last_frame(content_id, tmpdir, **kwargs)
def media_content_extract_id(handler, url, **kwargs):
handler = handlers.get_handler(handler)
return handler.extract_id(url, **kwargs)
def media_contents_javascripts(**kwargs):
course = kwargs.get('course', None)
handlers_ids = []
if course:
if course.promotion_media_content_type:
handlers_ids.append(course.promotion_media_content_type)
for unit in course.unit_set.all():
for kq in unit.knowledgequantum_set.all():
handlers_ids.append(kq.media_content_type)
for question in kq.question_set.all():
handlers_ids.append(question.solution_media_content_type)
handlers_ids = list(set(handlers_ids))
html = "<script>MEDIA_CONTENT_TYPES = %s;</script>" % json.dumps(dict([(item['id'], item) for item in settings.MEDIA_CONTENT_TYPES]))
for handler_id in handlers_ids:
handler = handlers.get_handler(handler_id)
html += handler.get_javascript_code(**kwargs)
return html
def get_media_content_types_choices():
choices = []
for handler_dict in settings.MEDIA_CONTENT_TYPES:
choices.append((handler_dict['id'], handler_dict.get('name', handler_dict['id'])))
return choices
| apache-2.0 | -2,980,033,750,012,387,300 | 32.703125 | 137 | 0.685211 | false |
SimonBiggs/electronfactors | test/test_poi.py | 1 | 1335 | # Copyright (C) 2015 Simon Biggs
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# http://www.gnu.org/licenses/.
import numpy as np
from electronfactors.ellipse.equivalent import poi_distance_method
def test_centre_of_square():
XCoords = np.array([-3, 3, 3, -3])
YCoords = np.array([3, 3, -3, -3])
poi = poi_distance_method(
XCoords=XCoords, YCoords=YCoords
)
assert np.abs(poi[0]) < 0.1
assert np.abs(poi[1]) < 0.1
def test_centre_of_arbitrary_cutout():
XCoords = np.array([-1, -0.2, 0, 0.7, 1, 0]) * 4 + 1
YCoords = np.array([0, -1, -.8, 0, .6, 1]) * 4 - 1
poi = poi_distance_method(
XCoords=XCoords, YCoords=YCoords
)
assert np.abs(poi[0] - 0.92) < 0.1
assert np.abs(poi[1] + 0.62) < 0.1
| agpl-3.0 | -1,580,463,007,600,693,000 | 31.560976 | 66 | 0.677903 | false |
bletham/fstimer | fstimer/gui/preregister.py | 1 | 5850 | #fsTimer - free, open source software for race timing.
#Copyright 2012-15 Ben Letham
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#The author/copyright holder can be contacted at [email protected]
'''Handling of the window handling preregistration setup'''
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
import fstimer.gui
import os
from fstimer.gui.util_classes import MsgDialog
from fstimer.gui.util_classes import GtkStockButton
class PreRegistrationWin(Gtk.Window):
'''Handling of the window handling preregistration setup'''
def __init__(self, path, set_registration_file_cb, handle_registration_cb):
'''Builds and display the window handling preregistration
set the computers registration ID, and optionally choose a pre-registration json'''
super(PreRegistrationWin, self).__init__(Gtk.WindowType.TOPLEVEL)
self.path = path
self.set_registration_file_cb = set_registration_file_cb
self.modify_bg(Gtk.StateType.NORMAL, fstimer.gui.bgcolor)
fname = os.path.abspath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../data/icon.png'))
self.set_icon_from_file(fname)
self.set_title('fsTimer - ' + os.path.basename(path))
self.set_position(Gtk.WindowPosition.CENTER)
self.connect('delete_event', lambda b, jnk: self.hide())
self.set_border_width(10)
# Start with some intro text.
prereglabel1 = Gtk.Label('Give a unique number to each computer used for registration.\nSelect a pre-registration file, if available.')
# Continue to the spinner
preregtable = Gtk.Table(3, 2, False)
preregtable.set_row_spacings(5)
preregtable.set_col_spacings(5)
preregtable.set_border_width(10)
regid = Gtk.Adjustment(value=1, lower=1, upper=99, step_incr=1)
regid_btn = Gtk.SpinButton(digits=0, climb_rate=0)
regid_btn.set_adjustment(regid)
preregtable.attach(regid_btn, 0, 1, 0, 1)
preregtable.attach(Gtk.Label(label="This computer's registration number"), 1, 2, 0, 1)
preregbtnFILE = Gtk.Button('Select pre-registration')
preregbtnFILE.connect('clicked', self.file_selected)
preregtable.attach(preregbtnFILE, 0, 1, 2, 3)
self.preregfilelabel = Gtk.Label(label='')
self.preregfilelabel.set_markup('<span color="blue">No pre-registration selected.</span>')
preregtable.attach(self.preregfilelabel, 1, 2, 2, 3)
## buttons
prereghbox = Gtk.HBox(True, 0)
preregbtnOK = GtkStockButton('ok',"OK")
preregbtnOK.connect('clicked', self.preregister_ok_cb, regid_btn, handle_registration_cb)
preregbtnCANCEL = GtkStockButton('close',"Close")
preregbtnCANCEL.connect('clicked', lambda b: self.hide())
prereghbox.pack_start(preregbtnOK, False, False, 5)
prereghbox.pack_start(preregbtnCANCEL, False, False, 5)
#Vbox
preregvbox = Gtk.VBox(False, 0)
preregbtnhalign = Gtk.Alignment.new(1, 0, 0, 0)
preregbtnhalign.add(prereghbox)
preregvbox.pack_start(prereglabel1, False, False, 5)
preregvbox.pack_start(preregtable, False, False, 5)
preregvbox.pack_start(preregbtnhalign, False, False, 5)
self.add(preregvbox)
self.show_all()
def file_selected(self, jnk_unused):
'''Handle selection of a pre-reg file using a filechooser.'''
chooser = Gtk.FileChooserDialog(title='Select pre-registration file', parent=self, action=Gtk.FileChooserAction.OPEN, buttons=('Cancel', Gtk.ResponseType.CANCEL, 'OK', Gtk.ResponseType.OK))
ffilter = Gtk.FileFilter()
ffilter.set_name('Registration files')
ffilter.add_pattern('*_registration_*.json')
chooser.add_filter(ffilter)
chooser.set_current_folder(self.path)
response = chooser.run()
if response == Gtk.ResponseType.OK:
filename = chooser.get_filename()
try:
self.set_registration_file_cb(filename)
self.preregfilelabel.set_markup('<span color="blue">Pre-registration '+os.path.basename(filename)+' loaded.</span>')
except (IOError, ValueError):
self.preregfilelabel.set_markup('<span color="red">ERROR! Failed to load '+os.path.basename(filename)+'.</span>')
chooser.destroy()
return
def preregister_ok_cb(self, jnk_unused, regid_btn, handle_registration_cb):
'''If OK is pushed on the pre-register window.'''
#First check if the file already exists
regid = regid_btn.get_value_as_int()
filename = os.path.join(self.path, os.path.basename(self.path)+'_registration_'+str(regid)+'.json')
if os.path.exists(filename):
#Raise a warning window
md = MsgDialog(self, 'warning', ['ok', 'cancel'], 'Proceed?', "A file with this registration number already exists.\nIf you continue it will be overwritten!")
resp = md.run()
md.destroy()
#Check the result.
if resp == Gtk.ResponseType.CANCEL:
#Do nothing.
return
#Else, continue on.
handle_registration_cb(regid) | gpl-3.0 | 8,722,225,096,570,457,000 | 49.008547 | 197 | 0.665128 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_operations.py | 1 | 4744 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Network Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Network/operations'} # type: ignore
| mit | 4,323,221,576,189,648,400 | 42.522936 | 133 | 0.640388 | false |
azurer100/monitor | monitor/collector.py | 1 | 7231 | #!/usr/bin/env python
# encoding: utf-8
'''
monitor.collector -- shortdesc
monitor.collector is a description
It defines classes_and_methods
@author: Yi
@copyright: 2016 MY. All rights reserved.
'''
import ConfigParser
import socket, time, string, logging
import MySQLdb
from encode import Encode
logging.basicConfig(level=logging.DEBUG,
filename='logs/collector.log',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d %b %Y %H:%M:%S')
bufsize = 1500
port = 10514
sql_linux_fs = "INSERT INTO linux_file_monitor_info(`access_time`,`operator_status`,`operator_path`,`process_name`,`exec_user`,`original_user`,`local_ip`,`file_md5`,`container_oid`,`aciton`,`status`) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
sql_linux_ps = "INSERT INTO linux_process_monitor_info(`access_time`,`process_status`,`file_path`,`pid`,`process_name`,`ppid`,`parent_process_name`,`exec_user`,`original_user`,`local_ip`,`file_md5`,`aciton`) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
sql_linux_net = "INSERT INTO linux_network_monitor_info(`access_time`,`loacl_address`,`foreign_address`,`state`,`protolcol`,`pid`,`progame_name`,`network_status`,`container_oid`,`aciton`) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
sql_linux_cmd = "INSERT INTO linux_command_monitor_info(`access_time`,`exec_command`,`exec_result`,`exec_user`,`original_user`,`local_ip`,`user_ip`,`operator_status`,`container_oid`,`aciton`) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
class Config:
def __init__(self, path = "./collector.ini"):
cf = ConfigParser.ConfigParser()
cf.read(path)
#return all section
secs = cf.sections()
logging.info("config sections: %s" % secs)
encode = cf.get("other", "encode")
self.db_host = cf.get("db", "host")
self.db_port = cf.getint("db", "port")
self.db_user = cf.get("db", "user")
if(encode == "0"):
self.db_pw = cf.get("db", "pw")
self.db_pw_b = Encode.encrypt(self.db_pw)
else:
self.db_pw_b = cf.get("db", "pw")
self.db_pw = Encode.decrypt(self.db_pw_b)
self.db_name = cf.get("db", "name")
self.sl_host = cf.get("syslog", "host")
self.sl_port = cf.getint("syslog", "port")
#modify one value and write to file
cf.set("db", "pw", self.db_pw_b)
cf.set("other", "encode", "1")
cf.write(open(path, "w"))
def linux_fs(ip, syslog):
items = syslog.split(" ")
file_path_action = items[0]
file_name = items[1]
process_name = items[2]
exec_user = items[3]
ori_user = items[4]
file_md5 = items[5]
return (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), file_path_action, file_name, process_name, exec_user, ori_user,
ip, file_md5, None, "1", None)
def linux_ps(ip, syslog):
items = syslog.split(" ")
return (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(items[1]))), items[0], items[2], items[3], items[4], items[5],
items[6], items[7], items[8], ip, "", "1")
def linux_net(ip, syslog):
items = syslog.split(" ")
return (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(items[0]))), items[1], items[2], items[3], items[4], (items[5] if items[5] != "" else None),
items[6], items[7], None, "1")
def linux_cmd(ip, syslog):
items = syslog.split(" ")
return (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(float(items[0]))), items[1], items[2], items[3], items[4], ip, items[5],
None, None, "1")
def main():
logging.info("starting collector...")
config = Config()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((config.sl_host, config.sl_port))
except Exception, e:
logging.error("error bind syslog port: " + str(e.args))
try:
conn = MySQLdb.connect(host=config.db_host, db=config.db_name, port=config.db_port, user=config.db_user, passwd=config.db_pw,
connect_timeout=10, use_unicode=True, autocommit=True)
curs = conn.cursor()
except Exception, e:
logging.error("mysql can not be connected: " + str(e.args))
logging.info("syslog is start to collect")
try:
while 1:
try:
data, addr = sock.recvfrom(bufsize)
syslog = str(data)
logging.debug("syslog: %s" % syslog)
# <131> Jul 26 11:34:47 2016 ubuntu linux_fs: hello 1 1 1 1 1 1 1
n = syslog.find('>')
serverty=string.atoi(syslog[1:n])&0x0007
facility=(string.atoi(syslog[1:n])&0x03f8)>>3
syslog_msg = syslog[27:]
host = syslog_msg[:syslog_msg.find(' ')]
syslog_msg = syslog[28+len(host) :]
who = syslog_msg[:syslog_msg.find(': ')]
syslog_msg = syslog[30+len(host + who) :]
if (who == "linux_fs"):
param = linux_fs(addr[0], syslog_msg)
curs.execute(sql_linux_fs, param)
if (who == "linux_ps"):
param1 = linux_ps(addr[0], syslog_msg)
curs.execute(sql_linux_ps, param1)
if (who == "linux_net"):
param2 = linux_net(addr[0], syslog_msg)
curs.execute(sql_linux_net, param2)
if (who == "linux_cmd"):
param3 = linux_cmd(addr[0], syslog_msg)
curs.execute(sql_linux_cmd, param3)
logging.info("syslog: %s" % syslog_msg)
except socket.error:
logging.error("syslog collection failed")
pass
except Exception, e:
logging.error("syslog stop: " + str(e.args))
sock.close()
curs.close()
conn.close()
# sys.exit()
time.sleep(10)
main()
if __name__ == '__main__':
main()
syslog_serverty={ 0:"emergency",
1:"alert",
2:"critical",
3:"error",
4:"warning",
5:"notice",
6:"info",
7:"debug"
}
syslog_facility={ 0:"kernel",
1:"user",
2:"mail",
3:"daemaon",
4:"auth",
5:"syslog",
6:"lpr",
7:"news",
8:"uucp",
9:"cron",
10:"authpriv",
11:"ftp",
12:"ntp",
13:"security",
14:"console",
15:"cron",
16:"local 0",
17:"local 1",
18:"local 2",
19:"local 3",
20:"local 4",
21:"local 5",
22:"local 6",
23:"local 7"
}
| mit | -5,116,564,051,706,430,000 | 36.466321 | 252 | 0.493016 | false |
skosukhin/spack | var/spack/repos/builtin.mock/packages/dttop/package.py | 1 | 1637 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Dttop(Package):
"""Package with a complicated dependency tree"""
homepage = "http://www.example.com"
url = "http://www.example.com/dttop-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
depends_on('dtbuild1', type='build')
depends_on('dtlink1')
depends_on('dtrun1', type='run')
def install(self, spec, prefix):
pass
| lgpl-2.1 | -7,798,513,058,010,136,000 | 38.926829 | 78 | 0.665241 | false |
tensorflow/probability | tensorflow_probability/python/bijectors/square.py | 1 | 2677 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Square bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.internal import assert_util
__all__ = [
'Square',
]
class Square(bijector.AutoCompositeTensorBijector):
"""Compute `g(X) = X^2`; X is a positive real number.
g is a bijection between the non-negative real numbers (R_+) and the
non-negative real numbers.
#### Examples
```python
bijector.Square().forward(x=[[1., 0], [2, 1]])
# Result: [[1., 0], [4, 1]], i.e., x^2
bijector.Square().inverse(y=[[1., 4], [9, 1]])
# Result: [[1., 2], [3, 1]], i.e., sqrt(y).
```
"""
def __init__(self, validate_args=False, name='square'):
"""Instantiates the `Square` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
super(Square, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
parameters=parameters,
name=name)
@classmethod
def _is_increasing(cls):
return True
@classmethod
def _parameter_properties(cls, dtype):
return dict()
def _forward(self, x):
with tf.control_dependencies(self._assertions(x)):
return tf.square(x)
def _inverse(self, y):
with tf.control_dependencies(self._assertions(y)):
return tf.sqrt(y)
def _forward_log_det_jacobian(self, x):
with tf.control_dependencies(self._assertions(x)):
return np.log(2.) + tf.math.log(x)
def _assertions(self, t):
if not self.validate_args:
return []
return [assert_util.assert_non_negative(
t, message='All elements must be non-negative.')]
| apache-2.0 | 9,180,104,289,838,083,000 | 27.784946 | 78 | 0.653343 | false |
recursionbane/tensorflow-prebuilt-classifier | predict.py | 1 | 1260 | import sys
# Check and fail early!
if (len(sys.argv) != 2):
print('\nERROR: Must supply the image you want to run prediction on!\n')
exit(-1)
import tensorflow as tf
image_path = sys.argv[1]
# Read in the image_data
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
| gpl-3.0 | 7,123,609,686,781,757,000 | 30.358974 | 73 | 0.629365 | false |
poeticcapybara/pythalesians | pythalesians-examples/bokeh_examples.py | 1 | 4452 | __author__ = 'saeedamen'
#
# Copyright 2015 Thalesians Ltd. - http//www.thalesians.com / @thalesians
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
bokeh_examples
Shows how to plot using Bokeh library.
"""
import datetime
from pythalesians.market.loaders.lighttimeseriesfactory import LightTimeSeriesFactory
from pythalesians.market.requests.timeseriesrequest import TimeSeriesRequest
from pythalesians.timeseries.calcs.timeseriescalcs import TimeSeriesCalcs
from pythalesians.graphics.graphs.plotfactory import PlotFactory
from pythalesians.graphics.graphs.graphproperties import GraphProperties
if True:
time_series_request = TimeSeriesRequest(
start_date = "01 Jan 2013", # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'google', # use Bloomberg as data source
tickers = ['Apple', 'S&P500 ETF'], # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = ['aapl', 'spy'], # ticker (Google)
vendor_fields = ['Close'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
ltsf = LightTimeSeriesFactory()
tsc = TimeSeriesCalcs()
df = tsc.create_mult_index_from_prices(ltsf.harvest_time_series(time_series_request))
gp = GraphProperties()
gp.html_file_output = "output_data/apple.htm"
gp.title = "S&P500 vs Apple"
# plot first with PyThalesians and then Bokeh
# just needs 1 word to change
gp.display_legend = False
pf = PlotFactory()
pf.plot_generic_graph(df, type = 'line', adapter = 'pythalesians', gp = gp)
pf.plot_generic_graph(df, type = 'line', adapter = 'bokeh', gp = gp)
# test simple Bokeh bar charts - monthly returns over past 6 months
if True:
from datetime import timedelta
ltsf = LightTimeSeriesFactory()
end = datetime.datetime.utcnow()
start = end - timedelta(days=180)
tickers = ['S&P500', 'FTSE', 'Nikkei']
vendor_tickers = ['SPX Index', 'UKX Index', 'NKY Index']
time_series_request = TimeSeriesRequest(
start_date = start, # start date
finish_date = datetime.date.today(), # finish date
freq = 'daily', # daily data
data_source = 'bloomberg', # use Bloomberg as data source
tickers = tickers, # ticker (Thalesians)
fields = ['close'], # which fields to download
vendor_tickers = vendor_tickers, # ticker (Bloomberg)
vendor_fields = ['PX_LAST'], # which Bloomberg fields to download
cache_algo = 'internet_load_return') # how to return data
daily_vals = ltsf.harvest_time_series(time_series_request)
# resample for end of month
daily_vals = daily_vals.resample('BM')
daily_vals = daily_vals / daily_vals.shift(1) - 1
daily_vals.index = [str(x.year) + '/' + str(x.month) for x in daily_vals.index]
daily_vals = daily_vals.drop(daily_vals.head(1).index)
pf = PlotFactory()
gp = GraphProperties()
gp.source = 'Thalesians/BBG'
gp.html_file_output = "output_data/equities.htm"
gp.title = 'Recent monthly changes in equity markets'
gp.scale_factor = 2
gp.display_legend = True
gp.chart_type = ['bar', 'scatter', 'line']
gp.x_title = 'Dates'
gp.y_title = 'Pc'
# plot using Bokeh then PyThalesians
pf.plot_bar_graph(daily_vals * 100, adapter = 'bokeh', gp = gp)
pf.plot_bar_graph(daily_vals * 100, adapter = 'pythalesians', gp = gp) | apache-2.0 | -1,957,251,320,556,141,600 | 41.009434 | 121 | 0.612534 | false |
johnnykv/heralding | heralding/capabilities/ftp.py | 1 | 3906 | # Copyright (C) 2017 Johnny Vestergaard <[email protected]>
#
# Rewritten by Aniket Panse <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Aniket Panse <[email protected]> grants Johnny Vestergaard <[email protected]>
# a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable
# copyright license to reproduce, prepare derivative works of, publicly
# display, publicly perform, sublicense, relicense, and distribute [the] Contributions
# and such derivative works.
import logging
from heralding.capabilities.handlerbase import HandlerBase
logger = logging.getLogger(__name__)
TERMINATOR = '\r\n'
class FtpHandler:
"""Handles a single FTP connection"""
def __init__(self, reader, writer, options, session):
self.banner = options['protocol_specific_data']['banner']
self.max_loggins = int(options['protocol_specific_data']['max_attempts'])
self.syst_type = options['protocol_specific_data']['syst_type']
self.authenticated = False
self.writer = writer
self.reader = reader
self.serve_flag = True
self.session = session
self.state = None
self.user = None
async def getcmd(self):
cmd = await self.reader.readline()
return str(cmd, 'utf-8')
async def serve(self):
await self.respond('220 ' + self.banner)
while self.serve_flag:
resp = await self.getcmd()
if not resp:
self.stop()
break
else:
try:
cmd, args = resp.split(' ', 1)
except ValueError:
cmd = resp
args = None
else:
args = args.strip('\r\n')
cmd = cmd.strip('\r\n')
cmd = cmd.upper()
# List of commands allowed before a login
unauth_cmds = ['USER', 'PASS', 'QUIT', 'SYST']
meth = getattr(self, 'do_' + cmd, None)
if not meth:
await self.respond('500 Unknown Command.')
else:
if not self.authenticated:
if cmd not in unauth_cmds:
await self.respond('503 Login with USER first.')
continue
await meth(args)
self.state = cmd
async def do_USER(self, arg):
self.user = arg
await self.respond('331 Now specify the Password.')
async def do_PASS(self, arg):
if self.state != 'USER':
await self.respond('503 Login with USER first.')
return
passwd = arg
self.session.add_auth_attempt(
'plaintext', username=self.user, password=passwd)
await self.respond('530 Authentication Failed.')
if self.session.get_number_of_login_attempts() >= self.max_loggins:
self.stop()
async def do_SYST(self, arg):
await self.respond('215 {0}'.format(self.syst_type))
async def do_QUIT(self, arg):
await self.respond('221 Bye.')
self.serve_flag = False
self.stop()
async def respond(self, msg):
msg += TERMINATOR
msg_bytes = bytes(msg, 'utf-8')
self.writer.write(msg_bytes)
await self.writer.drain()
def stop(self):
self.session.end_session()
class ftp(HandlerBase):
def __init__(self, options, loop):
super().__init__(options, loop)
self._options = options
async def execute_capability(self, reader, writer, session):
ftp_cap = FtpHandler(reader, writer, self._options, session)
await ftp_cap.serve()
| gpl-3.0 | 5,418,834,045,856,934,000 | 30.248 | 86 | 0.658474 | false |
hayalasalah/adhan.py | adhan/adhan.py | 1 | 3946 | """
adhan.py - The main interface for using the API.
Copyright (C) 2015 Zuhair Parvez
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import math
from datetime import datetime, timedelta
from functools import partial
# from adhan import calculations, methods
from .calculations import (
compute_time_at_sun_angle,
compute_zuhr_utc,
time_at_shadow_length,
)
from .methods import ASR_STANDARD
SUNRISE_ANGLE = 0.833
SUNSET_ANGLE = 0.833
def floating_point_to_datetime(day, fp_time):
"""Convert a floating point time to a datetime."""
result = datetime(year=day.year, month=day.month, day=day.day)
result += timedelta(minutes=math.ceil(60 * fp_time))
return result
def adhan(day, location, parameters, timezone_offset=0):
"""Calculate adhan times given the parameters.
This function will compute the adhan times for a certain location on
certain day. The method for calculating the prayers as well as the time for
Asr can also be specified. The timezone offset naively adds the specified
number of hours to each time that is returned.
:param day: The datetime.date to calculate for
:param location: 2-tuple of floating point coordiantes for latitude and
longitude of location in degrees
:param parameters: A dictionary-like object of parameters for computing
adhan times. Commonly used calculation methods are
available in the adhan.methods module
:param timezone_offset: The number of hours to add to each prayer time
to account for timezones. Can be floating point
"""
latitude, longitude = location
#
# To reduce a little repetitiveness, using a partial function that has the
# day and latitude already set
#
time_at_sun_angle = partial(
compute_time_at_sun_angle,
day=day,
latitude=latitude
)
zuhr_time = compute_zuhr_utc(day, longitude)
shuruq_time = zuhr_time - time_at_sun_angle(angle=SUNRISE_ANGLE)
maghrib_time = zuhr_time + time_at_sun_angle(angle=SUNSET_ANGLE)
fajr_time = zuhr_time - time_at_sun_angle(angle=parameters['fajr_angle'])
#
# Most methods define Isha as a certain angle the sun has to be below
# the horizon, but some methods define it as a certain number of minutes
# after Maghrib
#
if parameters.get('isha_delay', None):
isha_time = maghrib_time + parameters['isha_delay']
else:
isha_time = (
zuhr_time +
time_at_sun_angle(angle=parameters['isha_angle'])
)
#
# Default to standard Asr method if not specified
#
asr_multiplier = parameters.get('asr_multiplier', ASR_STANDARD)
asr_time = zuhr_time + time_at_shadow_length(
day=day, latitude=latitude, multiplier=asr_multiplier
)
offset = timedelta(minutes=60 * timezone_offset)
return {
'fajr': floating_point_to_datetime(day, fajr_time) + offset,
'zuhr': floating_point_to_datetime(day, zuhr_time) + offset,
'shuruq': floating_point_to_datetime(day, shuruq_time) + offset,
'asr': floating_point_to_datetime(day, asr_time) + offset,
'maghrib': floating_point_to_datetime(day, maghrib_time) + offset,
'isha': floating_point_to_datetime(day, isha_time) + offset,
}
| lgpl-3.0 | -2,185,333,171,867,021,300 | 33.920354 | 79 | 0.688039 | false |
jteehan/cfme_tests | fixtures/parallelizer/__init__.py | 1 | 28256 | """Parallel testing, supporting arbitrary collection ordering
The Workflow
------------
- Master py.test process starts up, inspects config to decide how many slave to start, if at all
- env['parallel_base_urls'] is inspected first
- py.test config.option.appliances and the related --appliance cmdline flag are used
if env['parallel_base_urls'] isn't set
- if neither are set, no parallelization happens
- Slaves are started
- Master runs collection, blocks until slaves report their collections
- Slaves each run collection and submit them to the master, then block inside their runtest loop,
waiting for tests to run
- Master diffs slave collections against its own; the test ids are verified to match
across all nodes
- Master enters main runtest loop, uses a generator to build lists of test groups which are then
sent to slaves, one group at a time
- For each phase of each test, the slave serializes test reports, which are then unserialized on
the master and handed to the normal pytest reporting hooks, which is able to deal with test
reports arriving out of order
- Before running the last test in a group, the slave will request more tests from the master
- If more tests are received, they are run
- If no tests are received, the slave will shut down after running its final test
- After all slaves are shut down, the master will do its end-of-session reporting as usual, and
shut down
"""
from itertools import groupby
import difflib
import json
import os
import signal
import subprocess
from collections import defaultdict, deque, namedtuple
from datetime import datetime
from itertools import count
import attr
from threading import Thread
from time import sleep, time
import pytest
import zmq
from _pytest import runner
from fixtures import terminalreporter
from fixtures.parallelizer import remote
from fixtures.pytest_store import store
from utils import at_exit, conf
from utils.appliance import IPAppliance, load_appliances_from_config
from utils.log import create_sublogger
from utils.path import conf_path
# Initialize slaveid to None, indicating this as the master process
# slaves will set this to a unique string when they're initialized
conf.runtime['env']['slaveid'] = None
if not conf.runtime['env'].get('ts'):
ts = str(time())
conf.runtime['env']['ts'] = ts
def pytest_addhooks(pluginmanager):
import hooks
pluginmanager.add_hookspecs(hooks)
@pytest.mark.trylast
def pytest_configure(config):
"""Configures the parallel session, then fires pytest_parallel_configured."""
reporter = terminalreporter.reporter()
if not config.option.appliances:
appliances = load_appliances_from_config(conf.env)
reporter.write_line('Retrieved these appliances from the conf.env', red=True)
else:
appliance_config = {
'appliances': [{'base_url': base_url} for base_url in config.option.appliances]}
# Grab the possible globals from the conf.env
for key, value in (
(key, value)
for key, value in conf.env.items()
if key in IPAppliance.CONFIG_MAPPING and key not in IPAppliance.CONFIG_NONGLOBAL):
appliance_config[key] = value
appliances = load_appliances_from_config(appliance_config)
reporter.write_line('Retrieved these appliances from the --appliance parameters', red=True)
for appliance in appliances:
reporter.write_line('* {!r}'.format(appliance), cyan=True)
if len(appliances) > 1:
session = ParallelSession(config, appliances)
config.pluginmanager.register(session, "parallel_session")
store.parallelizer_role = 'master'
reporter.write_line(
'As a parallelizer master kicking off parallel session for these {} appliances'.format(
len(appliances)),
green=True)
config.hook.pytest_parallel_configured(parallel_session=session)
else:
reporter.write_line('No parallelization required', green=True)
config.hook.pytest_parallel_configured(parallel_session=None)
def handle_end_session(signal, frame):
# when signaled, end the current test session immediately
if store.parallel_session:
store.parallel_session.session_finished = True
signal.signal(signal.SIGQUIT, handle_end_session)
@attr.s(hash=False)
class SlaveDetail(object):
slaveid_generator = ('slave{:02d}'.format(i) for i in count())
appliance = attr.ib()
id = attr.ib(default=attr.Factory(
lambda: next(SlaveDetail.slaveid_generator)))
forbid_restart = attr.ib(default=False, init=False)
tests = attr.ib(default=attr.Factory(set), repr=False)
process = attr.ib(default=None, repr=False)
provider_allocation = attr.ib(default=attr.Factory(list), repr=False)
def start(self):
if self.forbid_restart:
return
devnull = open(os.devnull, 'w')
# worker output redirected to null; useful info comes via messages and logs
self.process = subprocess.Popen(
['python', remote.__file__, self.id, self.appliance.as_json, conf.runtime['env']['ts']],
stdout=devnull,
)
at_exit(self.process.kill)
def poll(self):
if self.process is not None:
return self.process.poll()
class ParallelSession(object):
def __init__(self, config, appliances):
self.config = config
self.session = None
self.session_finished = False
self.countfailures = 0
self.collection = []
self.sent_tests = 0
self.log = create_sublogger('master')
self.maxfail = config.getvalue("maxfail")
self._failed_collection_errors = {}
self.terminal = store.terminalreporter
self.trdist = None
self.slaves = {}
self.test_groups = self._test_item_generator()
self._pool = []
from utils.conf import cfme_data
self.provs = sorted(set(cfme_data['management_systems'].keys()),
key=len, reverse=True)
self.used_prov = set()
self.failed_slave_test_groups = deque()
self.slave_spawn_count = 0
self.appliances = appliances
# set up the ipc socket
zmq_endpoint = 'ipc://{}'.format(
config.cache.makedir('parallelize').join(str(os.getpid())))
ctx = zmq.Context.instance()
self.sock = ctx.socket(zmq.ROUTER)
self.sock.bind(zmq_endpoint)
# clean out old slave config if it exists
slave_config = conf_path.join('slave_config.yaml')
slave_config.check() and slave_config.remove()
# write out the slave config
conf.runtime['slave_config'] = {
'args': self.config.args,
'options': self.config.option.__dict__,
'zmq_endpoint': zmq_endpoint,
}
if hasattr(self, "slave_appliances_data"):
conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
conf.runtime['slave_config']['options']['use_sprout'] = False # Slaves don't use sprout
conf.save('slave_config')
for appliance in self.appliances:
slave_data = SlaveDetail(appliance=appliance)
self.slaves[slave_data.id] = slave_data
for slave in sorted(self.slaves):
self.print_message("using appliance {}".format(self.slaves[slave].appliance.url),
slave, green=True)
def _slave_audit(self):
# XXX: There is currently no mechanism to add or remove slave_urls, short of
# firing up the debugger and doing it manually. This is making room for
# planned future abilities to dynamically add and remove slaves via automation
# check for unexpected slave shutdowns and redistribute tests
for slave in self.slaves.values():
returncode = slave.poll()
if returncode:
slave.process = None
if returncode == -9:
msg = '{} killed due to error, respawning'.format(slave.id)
else:
msg = '{} terminated unexpectedly with status {}, respawning'.format(
slave.id, returncode)
if slave.tests:
failed_tests, slave.tests = slave.tests, set()
num_failed_tests = len(failed_tests)
self.sent_tests -= num_failed_tests
msg += ' and redistributing {} tests'.format(num_failed_tests)
self.failed_slave_test_groups.append(failed_tests)
self.print_message(msg, purple=True)
# If a slave was terminated for any reason, kill that slave
# the terminated flag implies the appliance has died :(
for slave in list(self.slaves.values()):
if slave.forbid_restart:
if slave.process is None:
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.url)
del self.slaves[slave.id]
else:
# no hook call here, a future audit will handle the fallout
self.print_message(
"{}'s appliance has died, deactivating slave".format(slave.id))
self.interrupt(slave)
else:
if slave.process is None:
slave.start()
self.slave_spawn_count += 1
def send(self, slave, event_data):
"""Send data to slave.
``event_data`` will be serialized as JSON, and so must be JSON serializable
"""
event_json = json.dumps(event_data)
self.sock.send_multipart([slave.id, '', event_json])
def recv(self):
# poll the zmq socket, populate the recv queue deque with responses
events = zmq.zmq_poll([(self.sock, zmq.POLLIN)], 50)
if not events:
return None, None, None
slaveid, _, event_json = self.sock.recv_multipart(flags=zmq.NOBLOCK)
event_data = json.loads(event_json)
event_name = event_data.pop('_event_name')
if slaveid not in self.slaves:
self.log.error("message from terminated worker %s %s %s",
slaveid, event_name, event_data)
return None, None, None
return self.slaves[slaveid], event_data, event_name
def print_message(self, message, prefix='master', **markup):
"""Print a message from a node to the py.test console
Args:
message: The message to print
**markup: If set, overrides the default markup when printing the message
"""
# differentiate master and slave messages by default
prefix = getattr(prefix, 'id', prefix)
if not markup:
if prefix == 'master':
markup = {'blue': True}
else:
markup = {'cyan': True}
stamp = datetime.now().strftime("%Y%m%d %H:%M:%S")
self.terminal.write_ensure_prefix(
'({})[{}] '.format(prefix, stamp), message, **markup)
def ack(self, slave, event_name):
"""Acknowledge a slave's message"""
self.send(slave, 'ack {}'.format(event_name))
def monitor_shutdown(self, slave):
# non-daemon so slaves get every opportunity to shut down cleanly
shutdown_thread = Thread(target=self._monitor_shutdown_t,
args=(slave.id, slave.process))
shutdown_thread.start()
def _monitor_shutdown_t(self, slaveid, process):
# a KeyError here means self.slaves got mangled, indicating a problem elsewhere
if process is None:
self.log.warning('Slave was missing when trying to monitor shutdown')
def sleep_and_poll():
start_time = time()
# configure the polling logic
polls = 0
# how often to poll
poll_sleep_time = .5
# how often to report (calculated to be around once a minute based on poll_sleep_time)
poll_report_modulo = 60 / poll_sleep_time
# maximum time to wait
poll_num_sec = 300
while (time() - start_time) < poll_num_sec:
polls += 1
yield
if polls % poll_report_modulo == 0:
remaining_time = int(poll_num_sec - (time() - start_time))
self.print_message(
'{} still shutting down, '
'will continue polling for {} seconds '
.format(slaveid, remaining_time), blue=True)
sleep(poll_sleep_time)
# start the poll
for poll in sleep_and_poll():
ec = process.poll()
if ec is None:
continue
else:
if ec == 0:
self.print_message('{} exited'.format(slaveid), green=True)
else:
self.print_message('{} died'.format(slaveid), red=True)
break
else:
self.print_message('{} failed to shut down gracefully; killed'.format(slaveid),
red=True)
process.kill()
def interrupt(self, slave, **kwargs):
"""Nicely ask a slave to terminate"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.send_signal(subprocess.signal.SIGINT)
self.monitor_shutdown(slave, **kwargs)
def kill(self, slave, **kwargs):
"""Rudely kill a slave"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.kill()
self.monitor_shutdown(slave, **kwargs)
def send_tests(self, slave):
"""Send a slave a group of tests"""
try:
tests = list(self.failed_slave_test_groups.popleft())
except IndexError:
tests = self.get(slave)
self.send(slave, tests)
slave.tests.update(tests)
collect_len = len(self.collection)
tests_len = len(tests)
self.sent_tests += tests_len
if tests:
self.print_message('sent {} tests to {} ({}/{}, {:.1f}%)'.format(
tests_len, slave.id, self.sent_tests, collect_len,
self.sent_tests * 100. / collect_len
))
return tests
def pytest_sessionstart(self, session):
"""pytest sessionstart hook
- sets up distributed terminal reporter
- sets up zmp ipc socket for the slaves to use
- writes pytest options and args to slave_config.yaml
- starts the slaves
- register atexit kill hooks to destroy slaves at the end if things go terribly wrong
"""
# If reporter() gave us a fake terminal reporter in __init__, the real
# terminal reporter is registered by now
self.terminal = store.terminalreporter
self.trdist = TerminalDistReporter(self.config, self.terminal)
self.config.pluginmanager.register(self.trdist, "terminaldistreporter")
self.session = session
def pytest_runtestloop(self):
"""pytest runtest loop
- Disable the master terminal reporter hooks, so we can add our own handlers
that include the slaveid in the output
- Send tests to slaves when they ask
- Log the starting of tests and test results, including slave id
- Handle clean slave shutdown when they finish their runtest loops
- Restore the master terminal reporter after testing so we get the final report
"""
# Build master collection for slave diffing and distribution
self.collection = [item.nodeid for item in self.session.items]
# Fire up the workers after master collection is complete
# master and the first slave share an appliance, this is a workaround to prevent a slave
# from altering an appliance while master collection is still taking place
for slave in self.slaves.values():
slave.start()
try:
self.print_message("Waiting for {} slave collections".format(len(self.slaves)),
red=True)
# Turn off the terminal reporter to suppress the builtin logstart printing
terminalreporter.disable()
while True:
# spawn/kill/replace slaves if needed
self._slave_audit()
if not self.slaves:
# All slaves are killed or errored, we're done with tests
self.print_message('all slaves have exited', yellow=True)
self.session_finished = True
if self.session_finished:
break
slave, event_data, event_name = self.recv()
if event_name == 'message':
message = event_data.pop('message')
markup = event_data.pop('markup')
# messages are special, handle them immediately
self.print_message(message, slave, **markup)
self.ack(slave, event_name)
elif event_name == 'collectionfinish':
slave_collection = event_data['node_ids']
# compare slave collection to the master, all test ids must be the same
self.log.debug('diffing {} collection'.format(slave.id))
diff_err = report_collection_diff(
slave.id, self.collection, slave_collection)
if diff_err:
self.print_message(
'collection differs, respawning', slave.id,
purple=True)
self.print_message(diff_err, purple=True)
self.log.error('{}'.format(diff_err))
self.kill(slave)
slave.start()
else:
self.ack(slave, event_name)
elif event_name == 'need_tests':
self.send_tests(slave)
self.log.info('starting master test distribution')
elif event_name == 'runtest_logstart':
self.ack(slave, event_name)
self.trdist.runtest_logstart(
slave.id,
event_data['nodeid'],
event_data['location'])
elif event_name == 'runtest_logreport':
self.ack(slave, event_name)
report = unserialize_report(event_data['report'])
if report.when in ('call', 'teardown'):
slave.tests.discard(report.nodeid)
self.trdist.runtest_logreport(slave.id, report)
elif event_name == 'internalerror':
self.ack(slave, event_name)
self.print_message(event_data['message'], slave, purple=True)
self.kill(slave)
elif event_name == 'shutdown':
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.appliance.url)
self.ack(slave, event_name)
del self.slaves[slave.id]
self.monitor_shutdown(slave)
# total slave spawn count * 3, to allow for each slave's initial spawn
# and then each slave (on average) can fail two times
if self.slave_spawn_count >= len(self.appliances) * 3:
self.print_message(
'too many slave respawns, exiting',
red=True, bold=True)
raise KeyboardInterrupt('Interrupted due to slave failures')
except Exception as ex:
self.log.error('Exception in runtest loop:')
self.log.exception(ex)
self.print_message(str(ex))
raise
finally:
terminalreporter.enable()
# Suppress other runtestloop calls
return True
def _test_item_generator(self):
for tests in self._modscope_item_generator():
yield tests
def _modscope_item_generator(self):
# breaks out tests by module, can work just about any way we want
# as long as it yields lists of tests id from the master collection
sent_tests = 0
collection_len = len(self.collection)
def get_fspart(nodeid):
return nodeid.split('::')[0]
for fspath, gen_moditems in groupby(self.collection, key=get_fspart):
for tests in self._modscope_id_splitter(gen_moditems):
sent_tests += len(tests)
self.log.info('{} tests remaining to send'.format(
collection_len - sent_tests))
yield list(tests)
def _modscope_id_splitter(self, module_items):
# given a list of item ids from one test module, break up tests into groups with the same id
parametrized_ids = defaultdict(list)
for item in module_items:
if '[' in item:
# split on the leftmost bracket, then strip everything after the rightmight bracket
# so 'test_module.py::test_name[parametrized_id]' becomes 'parametrized_id'
parametrized_id = item.split('[')[1].rstrip(']')
else:
# splits failed, item has no parametrized id
parametrized_id = 'no params'
parametrized_ids[parametrized_id].append(item)
for id, tests in parametrized_ids.items():
if tests:
self.log.info('sent tests with param {} {!r}'.format(id, tests))
yield tests
def get(self, slave):
def provs_of_tests(test_group):
found = set()
for test in test_group:
found.update(pv for pv in self.provs
if '[' in test and pv in test)
return sorted(found)
if not self._pool:
for test_group in self.test_groups:
self._pool.append(test_group)
self.used_prov.update(provs_of_tests(test_group))
if self.used_prov:
self.ratio = float(len(self.slaves)) / len(self.used_prov)
else:
self.ratio = 0.0
if not self._pool:
return []
appliance_num_limit = 1
for idx, test_group in enumerate(self._pool):
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
if prov in slave.provider_allocation:
# provider is already with the slave, so just return the tests
self._pool.remove(test_group)
return test_group
else:
if len(slave.provider_allocation) >= appliance_num_limit:
continue
else:
# Adding provider to slave since there are not too many
slave.provider_allocation.append(prov)
self._pool.remove(test_group)
return test_group
else:
# No providers - ie, not a provider parametrized test
# or no params, so not parametrized at all
self._pool.remove(test_group)
return test_group
# Here means no tests were able to be sent
for test_group in self._pool:
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
# Already too many slaves with provider
app = slave.appliance
self.print_message(
'cleansing appliance', slave, purple=True)
try:
app.delete_all_providers()
except Exception as e:
self.print_message(
'cloud not cleanse', slave, red=True)
self.print_message('error:', e, red=True)
slave.provider_allocation = [prov]
self._pool.remove(test_group)
return test_group
assert not self._pool, self._pool
return []
def report_collection_diff(slaveid, from_collection, to_collection):
"""Report differences, if any exist, between master and a slave collection
Raises RuntimeError if collections differ
Note:
This function will sort functions before comparing them.
"""
from_collection, to_collection = sorted(from_collection), sorted(to_collection)
if from_collection == to_collection:
# Well, that was easy.
return
# diff the two, so we get some idea of what's wrong
diff = difflib.unified_diff(
from_collection,
to_collection,
fromfile='master',
tofile=slaveid,
)
# diff is a line generator, stringify it
diff = '\n'.join([line.rstrip() for line in diff])
return '{slaveid} diff:\n{diff}\n'.format(slaveid=slaveid, diff=diff)
class TerminalDistReporter(object):
"""Terminal Reporter for Distributed Testing
trdist reporter exists to make sure we get good distributed logging during the runtest loop,
which means the normal terminal reporter should be disabled during the loop
This class is where we make sure the terminal reporter is made aware of whatever state it
needs to report properly once we turn it back on after the runtest loop
It has special versions of pytest reporting hooks that, where possible, try to include a
slave ID. These hooks are called in :py:class:`ParallelSession`'s runtestloop hook.
"""
def __init__(self, config, terminal):
self.config = config
self.tr = terminal
self.outcomes = {}
def runtest_logstart(self, slaveid, nodeid, location):
test = self.tr._locationline(nodeid, *location)
prefix = '({}) {}'.format(slaveid, test)
self.tr.write_ensure_prefix(prefix, 'running', blue=True)
self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)
def runtest_logreport(self, slaveid, report):
# Run all the normal logreport hooks
self.config.hook.pytest_runtest_logreport(report=report)
# Now do what the terminal reporter would normally do, but include parallelizer info
outcome, letter, word = self.config.hook.pytest_report_teststatus(report=report)
# Stash stats on the terminal reporter so it reports properly
# after it's reenabled at the end of runtestloop
self.tr.stats.setdefault(outcome, []).append(report)
test = self.tr._locationline(report.nodeid, *report.location)
prefix = '({}) {}'.format(slaveid, test)
try:
# for some reason, pytest_report_teststatus returns a word, markup tuple
# when the word would be 'XPASS', so unpack it here if that's the case
word, markup = word
except (TypeError, ValueError):
# word wasn't iterable or didn't have enough values, use it as-is
pass
if word in ('PASSED', 'xfail'):
markup = {'green': True}
elif word in ('ERROR', 'FAILED', 'XPASS'):
markup = {'red': True}
elif word:
markup = {'yellow': True}
# For every stage where we can report the outcome, stash it in the outcomes dict
if word:
self.outcomes[test] = Outcome(word, markup)
# Then, when we get to the teardown report, print the last outcome
# This prevents reportings a test as 'PASSED' if its teardown phase fails, for example
if report.when == 'teardown':
word, markup = self.outcomes.pop(test)
self.tr.write_ensure_prefix(prefix, word, **markup)
Outcome = namedtuple('Outcome', ['word', 'markup'])
def unserialize_report(reportdict):
"""
Generate a :py:class:`TestReport <pytest:_pytest.runner.TestReport>` from a serialized report
"""
return runner.TestReport(**reportdict)
| gpl-2.0 | 3,688,819,085,660,927,500 | 39.423462 | 100 | 0.590352 | false |
kindy61/mako | test/test_tgplugin.py | 1 | 1221 | import unittest
from mako.ext.turbogears import TGPlugin
from util import flatten_result, result_lines
tl = TGPlugin(options=dict(directories=['./test_htdocs']), extension='html')
class TestTGPlugun(unittest.TestCase):
def test_basic(self):
t = tl.load_template('/index.html')
assert result_lines(t.render()) == [
"this is index"
]
def test_subdir(self):
t = tl.load_template('/subdir/index.html')
assert result_lines(t.render()) == [
"this is sub index",
"this is include 2"
]
assert tl.load_template('/subdir/index.html').module_id == '_subdir_index_html'
def test_basic_dot(self):
t = tl.load_template('index')
assert result_lines(t.render()) == [
"this is index"
]
def test_subdir_dot(self):
t = tl.load_template('subdir.index')
assert result_lines(t.render()) == [
"this is sub index",
"this is include 2"
]
assert tl.load_template('subdir.index').module_id == '_subdir_index_html'
def test_string(self):
t = tl.load_template('foo', "hello world")
assert t.render() == "hello world"
| mit | -6,639,422,518,448,726,000 | 28.780488 | 87 | 0.57412 | false |
kubeflow/kfp-tekton | sdk/python/tests/compiler/testdata/recursion_while.py | 1 | 1971 | # Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import dsl
from kfp_tekton.compiler import TektonCompiler
class Coder:
def empty(self):
return ""
TektonCompiler._get_unique_id_code = Coder.empty
def flip_coin_op():
"""Flip a coin and output heads or tails randomly."""
return dsl.ContainerOp(
name='Flip coin',
image='python:alpine3.6',
command=['sh', '-c'],
arguments=['python -c "import random; result = \'heads\' if random.randint(0,1) == 0 '
'else \'tails\'; print(result)" | tee /tmp/output'],
file_outputs={'output': '/tmp/output'}
)
def print_op(msg):
"""Print a message."""
return dsl.ContainerOp(
name='Print',
image='alpine:3.6',
command=['echo', msg],
)
@dsl._component.graph_component
def flip_component(flip_result, maxVal):
with dsl.Condition(flip_result == 'heads'):
print_flip = print_op(flip_result)
flipA = flip_coin_op().after(print_flip)
flip_component(flipA.output, maxVal)
@dsl.pipeline(
name='recursion pipeline',
description='shows how to use graph_component and recursion.'
)
def flipcoin(maxVal=12):
flip_out = flip_coin_op()
flip_loop = flip_component(flip_out.output, maxVal)
print_op('cool, it is over. %s' % flip_out.output).after(flip_loop)
if __name__ == '__main__':
TektonCompiler().compile(flipcoin, __file__.replace('.py', '.yaml'))
| apache-2.0 | 2,473,245,731,787,266,600 | 28.41791 | 94 | 0.663115 | false |
255BITS/HyperGAN | hypergan/losses/base_loss.py | 1 | 14033 | from hypergan.gan_component import GANComponent
import numpy as np
import tensorflow as tf
class BaseLoss(GANComponent):
def __init__(self, gan, config, discriminator=None, generator=None, x=None, split=2, d_fake=None, d_real=None, reuse=False, name="BaseLoss"):
self.sample = None
self.ops = None
self.reuse=reuse
self.x = x
self.d_fake = d_fake
self.d_real = d_real
self.discriminator = discriminator or gan.discriminator
self.generator = generator
self.split = split
GANComponent.__init__(self, gan, config, name=name)
def reuse(self, d_real=None, d_fake=None):
self.discriminator.ops.reuse()
net = self._create(d_real, d_fake)
self.discriminator.ops.stop_reuse()
return net
def create(self):
gan = self.gan
config = self.config
ops = self.gan.ops
split = self.split
d_real = self.d_real
d_fake = self.d_fake
d_loss = None
g_loss = None
if d_real is None or d_fake is None:
# Not passed in, lets populate d_real/d_fake
net = self.discriminator.sample
ds = self.split_batch(net, split)
d_real = ds[0]
d_fake = tf.add_n(ds[1:])/(len(ds)-1)
d_loss, g_loss = self._create(d_real, d_fake)
else:
d_loss, g_loss = self._create(d_real, d_fake)
d_regularizers = []
g_regularizers = []
d_loss_features = d_loss
g_loss_features = g_loss
self.d_loss_features = d_loss_features
self.g_loss_features = g_loss_features
if config.minibatch:
d_net = tf.concat([d_real, d_fake], axis=0)
d_regularizers.append(self.minibatch(d_net)) # TODO on d_loss_features?
if config.gradient_locally_stable:
d_vars = gan.discriminator.variables()
g_vars = (gan.encoder.variables() + gan.generator.variables())
gls = tf.gradients(d_loss, d_vars+g_vars)
gls = tf.square(tf.global_norm(gls))
g_regularizers.append(config.gradient_locally_stable * gls)
self.add_metric('gradient_locally_stable', ops.squash(gls, tf.reduce_mean))
print("Gradient locally stable applied")
if config.gradient_penalty:
gp = self.gradient_penalty()
d_regularizers.append(gp)
self.add_metric('gradient_penalty', ops.squash(gp, tf.reduce_mean))
print("Gradient penalty applied")
if config.k_lipschitz_penalty:
lipschitz_penalty = tf.maximum(tf.square(d_real) - 1, 0) + tf.maximum(tf.square(d_fake) - 1, 0)
self.add_metric('k_lipschitz', ops.squash(lipschitz_penalty))
d_regularizers.append(lipschitz_penalty)
if config.jg_penalty:
d_vars = gan.d_vars()
g_vars = gan.g_vars()
reg_g_grads = tf.gradients(g_loss, g_vars)
reg_d_grads = tf.gradients(g_loss, d_vars)
reg_d_grads = tf.square(tf.global_norm(reg_d_grads))
reg_g_grads = tf.square(tf.global_norm(reg_g_grads))
d_loss += 0.5*(config.jg_lambda or 0.01)*reg_d_grads
g_loss += 0.5*(config.jg_lambda or 0.01)*reg_g_grads
self.add_metric('reg_d', reg_d_grads)
self.add_metric('reg_g', reg_g_grads)
self.add_metric('reg_d', g_d_grads)
self.add_metric('reg_g', g_g_grads)
if config.l2nn_penalty:
l2nn_penalties = []
weights = self.gan.weights()
if config.l2nn_penalty_only_d:
weights = self.discriminator.weights()
if len(weights) > 0:
for w in weights:
w = tf.reshape(w, [-1, self.ops.shape(w)[-1]])
wt = tf.transpose(w)
wtw = tf.matmul(wt,w)
wwt = tf.matmul(w,wt)
def _l(m):
m = tf.abs(m)
m = tf.reduce_sum(m, axis=0,keep_dims=True)
m = tf.maximum(m-1, 0)
m = tf.reduce_max(m, axis=1,keep_dims=True)
return m
l2nn_penalties.append(tf.minimum(_l(wtw), _l(wwt)))
print('l2nn_penalty', self.config.l2nn_penalty, l2nn_penalties)
l2nn_penalty = self.config.l2nn_penalty * tf.add_n(l2nn_penalties)
self.add_metric('l2nn_penalty', self.gan.ops.squash(l2nn_penalty))
l2nn_penalty = tf.tile(l2nn_penalty, [self.gan.batch_size(), 1])
d_regularizers.append(l2nn_penalty)
if config.ortho_penalty:
penalties = []
for w in self.gan.weights():
print("PENALTY", w)
w = tf.reshape(w, [-1, self.ops.shape(w)[-1]])
wt = tf.transpose(w)
wtw = tf.matmul(wt,w)
wwt = tf.matmul(w,wt)
mwtw = tf.matmul(w, wtw)
mwwt = tf.matmul(wt, wwt)
def _l(w,m):
l = tf.reduce_mean(tf.abs(w - m))
l = self.ops.squash(l)
return l
penalties.append(tf.minimum(_l(w, mwtw), _l(wt, mwwt)))
penalty = self.config.ortho_penalty * tf.add_n(penalties)
self.add_metric('ortho_penalty', self.gan.ops.squash(penalty))
print("PENALTY", penalty)
penalty = tf.reshape(penalty, [1,1])
penalty = tf.tile(penalty, [self.gan.batch_size(), 1])
d_regularizers.append(penalty)
if config.rothk_penalty:
rothk = self.rothk_penalty(d_real, d_fake)
self.add_metric('rothk_penalty', self.gan.ops.squash(rothk))
#d_regularizers.append(rothk)
d_loss += rothk
print("rothk penalty applied")
if config.k_lipschitz_penalty_ragan:
lipschitz_penalty = tf.maximum(tf.square(d_real-d_fake) - 1, 0) + tf.maximum(tf.square(d_fake-d_real) - 1, 0)
self.metrics['k_lipschitz_ragan']=lipschitz_penalty
d_regularizers.append(lipschitz_penalty)
if config.random_penalty:
gp = self.random_penalty(d_fake, d_real)
d_regularizers.append(gp)
self.add_metric('random_penalty', ops.squash(gp, tf.reduce_mean))
if self.gan.config.infogan and not hasattr(self.gan, 'infogan_q'):
sample = self.gan.generator.sample
d = self.gan.create_component(self.gan.config.discriminator, name="discriminator", input=sample, reuse=True, features=[tf.zeros([1,16,16,256])])
last_layer = d.controls['infogan']
q = self.gan.create_component(self.gan.config.infogan, input=(self.gan.discriminator.controls['infogan']), name='infogan')
self.gan.infogan_q=q
std_cont = tf.sqrt(tf.exp(q.sample))
true = self.gan.uniform_distribution.z
mean = tf.reshape(q.sample, self.ops.shape(true))
std_cont = tf.reshape(std_cont, self.ops.shape(true))
eps = (true - mean) / (std_cont + 1e-8)
continuous = -tf.reduce_mean( -0.5 * np.log(2*np.pi)- tf.log(std_cont+1e-8)*tf.square(eps), reduction_indices=1)
if self.gan.config.infogan.flipped:
continuous = -continuous
self.metrics['cinfo']=ops.squash(continuous)
d_regularizers.append(continuous)
d_regularizers += self.d_regularizers()
g_regularizers += self.g_regularizers()
print("prereg", d_loss)
if len(d_regularizers) > 0:
d_loss += tf.add_n(d_regularizers)
if len(g_regularizers) > 0:
g_loss += tf.add_n(g_regularizers)
d_loss = ops.squash(d_loss, config.reduce or tf.reduce_mean) #linear doesn't work with this
# TODO: Why are we squashing before gradient penalty?
self.add_metric('d_loss', d_loss)
if g_loss is not None:
g_loss = ops.squash(g_loss, config.reduce or tf.reduce_mean)
self.add_metric('g_loss', g_loss)
self.sample = [d_loss, g_loss]
self.d_loss = d_loss
self.g_loss = g_loss
self.d_fake = d_fake
self.d_real = d_real
return self.sample
def d_regularizers(self):
return []
def g_regularizers(self):
return []
# This is openai's implementation of minibatch regularization
def minibatch(self, net):
discriminator = self.discriminator
ops = discriminator.ops
config = self.config
batch_size = ops.shape(net)[0]
single_batch_size = batch_size//2
n_kernels = config.minibatch_kernels or 300
dim_per_kernel = config.dim_per_kernel or 50
print("[discriminator] minibatch from", net, "to", n_kernels*dim_per_kernel)
x = ops.linear(net, n_kernels * dim_per_kernel)
activation = tf.reshape(x, (batch_size, n_kernels, dim_per_kernel))
big = np.zeros((batch_size, batch_size))
big += np.eye(batch_size)
big = tf.expand_dims(big, 1)
big = tf.cast(big,dtype=ops.dtype)
abs_dif = tf.reduce_sum(tf.abs(tf.expand_dims(activation,3) - tf.expand_dims(tf.transpose(activation, [1, 2, 0]), 0)), 2)
mask = 1. - big
masked = tf.exp(-abs_dif) * mask
def half(tens, second):
m, n, _ = tens.get_shape()
m = int(m)
n = int(n)
return tf.slice(tens, [0, 0, second * single_batch_size], [m, n, single_batch_size])
f1 = tf.reduce_sum(half(masked, 0), 2) / tf.reduce_sum(half(mask, 0))
f2 = tf.reduce_sum(half(masked, 1), 2) / tf.reduce_sum(half(mask, 1))
return ops.squash(ops.concat([f1, f2]))
def gradient_locally_stable(self, d_net):
config = self.config
generator = self.generator
g_sample = self.gan.uniform_sample
gradients = tf.gradients(d_net, [g_sample])[0]
return float(config.gradient_locally_stable) * \
tf.nn.l2_normalize(gradients, dim=1)
def rothk_penalty(self, d_real, d_fake):
config = self.config
g_sample = self.gan.uniform_sample
x = self.gan.inputs.x
gradx = tf.gradients(d_real, [x])[0]
gradg = tf.gradients(d_fake, [g_sample])[0]
gradx = tf.reshape(gradx, [self.ops.shape(gradx)[0], -1])
gradg = tf.reshape(gradg, [self.ops.shape(gradg)[0], -1])
gradx_norm = tf.norm(gradx, axis=1, keep_dims=True)
gradg_norm = tf.norm(gradg, axis=1, keep_dims=True)
if int(gradx_norm.get_shape()[0]) != int(d_real.get_shape()[0]):
print("Condensing along batch for rothk")
gradx_norm = tf.reduce_mean(gradx_norm, axis=0)
gradg_norm = tf.reduce_mean(gradg_norm, axis=0)
gradx = tf.square(gradx_norm) * tf.square(1-tf.nn.sigmoid(d_real))
gradg = tf.square(gradg_norm) * tf.square(tf.nn.sigmoid(d_fake))
loss = gradx + gradg
loss *= config.rothk_lambda or 1
if config.rothk_decay:
decay_function = config.decay_function or tf.train.exponential_decay
decay_steps = config.decay_steps or 50000
decay_rate = config.decay_rate or 0.9
decay_staircase = config.decay_staircase or False
global_step = tf.train.get_global_step()
loss = decay_function(loss, global_step, decay_steps, decay_rate, decay_staircase)
return loss
def gradient_penalty(self):
config = self.config
gan = self.gan
ops = self.gan.ops
gradient_penalty = config.gradient_penalty
x = self.x
if x is None:
x=gan.inputs.x
g = self.generator
discriminator = self.discriminator or gan.discriminator
shape = [1 for t in ops.shape(x)]
shape[0] = gan.batch_size()
uniform_noise = tf.random_uniform(shape=shape,minval=0.,maxval=1.)
print("[gradient penalty] applying x:", x, "g:", g, "noise:", uniform_noise)
if config.gradient_penalty_type == 'dragan':
axes = [0, 1, 2, 3]
if len(ops.shape(x)) == 2:
axes = [0, 1]
mean, variance = tf.nn.moments(x, axes=axes)
interpolates = x + uniform_noise * 0.5 * variance * tf.random_uniform(shape=ops.shape(x), minval=0.,maxval=1.)
else:
interpolates = x + uniform_noise * (g - x)
reused_d = discriminator.reuse(interpolates)
gradients = tf.gradients(reused_d, [interpolates])[0]
penalty = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=1))
penalty = tf.square(penalty - 1.)
return float(gradient_penalty) * penalty
def random_penalty(self, d_fake, d_real):
config = self.config
gan = self.gan
ops = self.gan.ops
gradient_penalty = config.gradient_penalty
x = self.x
if x is None:
x=gan.inputs.x
shape = [1 for t in ops.shape(x)]
shape[0] = gan.batch_size()
uniform_noise = tf.random_uniform(shape=shape,minval=0.,maxval=1.)
mask = tf.cast(tf.greater(0.5, uniform_noise), tf.float32)
#interpolates = x * mask + g * (1-mask)
d = d_fake *(1-mask) + d_real * mask#discriminator.reuse(interpolates)
offset = config.random_penalty_offset or -0.8
penalty = tf.square(d - offset)
return penalty
def sigmoid_kl_with_logits(self, logits, targets):
# broadcasts the same target value across the whole batch
# this is implemented so awkwardly because tensorflow lacks an x log x op
assert isinstance(targets, float)
if targets in [0., 1.]:
entropy = 0.
else:
entropy = - targets * np.log(targets) - (1. - targets) * np.log(1. - targets)
return tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=tf.ones_like(logits) * targets) - entropy
| mit | -5,949,596,991,010,931,000 | 41.01497 | 156 | 0.566308 | false |
omnbmh/pi6x | webapps/weibo/demo.py | 1 | 2953 | # -*- coding:utf-8 -*- #
#! /usr/bin/env python
import time
#sys.path.insert(0, 'tweibo.zip')
from tweibo import *
# 换成你的 APPKEY
APP_KEY = "100628862"
APP_SECRET = "021e18ea097817f15a819a45c0e5c592"
CALLBACK_URL = "http://127.0.0.1:8000"
# 请先按照 https://github.com/upbit/tweibo-pysdk/wiki/OAuth2Handler 的鉴权说明填写 ACCESS_TOKEN 和 OPENID
ACCESS_TOKEN = "c3337750b56e1ee3d35e669ebdea0eef"
OPENID = "99A960D0C781A65640DD2A1BE48CCD6A"
IMG_EXAMPLE = "example.png"
# 返回text是unicode,设置默认编码为utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def access_token_test():
""" 访问get_access_token_url()的URL并授权后,会跳转callback页面,其中包含如下参数:
#access_token=00000000000ACCESSTOKEN0000000000&expires_in=8035200&openid=0000000000000OPENID0000000000000&openkey=0000000000000OPENKEY000000000000&refresh_token=0000000000REFRESHTOKEN00000000&state=
保存下其中的 access_token, openid 并调用
oauth.set_access_token(access_token)
oauth.set_openid(openid)
即可完成 OAuth2Handler() 的初始化。可以记录 access_token 等信息
"""
oauth = OAuth2Handler()
oauth.set_app_key_secret(APP_KEY, APP_SECRET, CALLBACK_URL)
print oauth.get_access_token_url()
def tweibo_test():
oauth = OAuth2Handler()
oauth.set_app_key_secret(APP_KEY, APP_SECRET, CALLBACK_URL)
oauth.set_access_token(ACCESS_TOKEN)
oauth.set_openid(OPENID)
api = API(oauth)
#api = API(oauth, host="127.0.0.1", port=8888) # Init API() with proxy
# GET /t/show
#tweet1 = api.get.t__show(format="json", id=301041004850688)
#print ">> %s: %s" % (tweet1.data.nick, tweet1.data.text)
# POST /t/add
#content_str = "[from PySDK] %s says: %s" % (tweet1.data.nick, tweet1.data.origtext)
#tweet2 = api.post.t__add(format="json", content=content_str, clientip="10.0.0.1")
#print ">> time=%s, http://t.qq.com/p/t/%s" % (tweet2.data.time, tweet2.data.id)
# GET /statuses/user_timeline
#user_timeline = api.get.statuses__user_timeline(format="json", name="qqfarm", reqnum=3, pageflag=0, lastid=0, pagetime=0, type=3, contenttype=0)
#for idx, tweet in enumerate(user_timeline.data.info):
# print "[%d] http://t.qq.com/p/t/%s, (type:%d) %s" % (idx+1, tweet.id, tweet.type, tweet.text)
# UPLOAD /t/upload_pic
pic1 = api.upload.t__upload_pic(format="json", pic_type=2, pic=open(IMG_EXAMPLE, "rb"))
print ">> IMG: %s" % (pic1.data.imgurl)
# POST /t/add_pic_url
content_str2 = "[from PySDK] add pic demo: %s, time %s" % (IMG_EXAMPLE, time.time())
pic_urls = "%s" % (pic1.data.imgurl)
tweet_pic1 = api.post.t__add_pic_url(format="json", content=content_str2, pic_url=pic_urls, clientip="10.0.0.1")
print ">> time=%s, http://t.qq.com/p/t/%s" % (tweet_pic1.data.time, tweet_pic1.data.id)
if __name__ == '__main__':
#access_token_test()
tweibo_test()
| mit | 7,072,474,937,092,337,000 | 39.042857 | 206 | 0.674634 | false |
MicrosoftGenomics/FaST-LMM | fastlmm/util/runner/LocalMultiThread.py | 1 | 2490 | '''
Runs a distributable job on multiple processors. Returns the value of the job.
See SamplePi.py for examples.
'''
from fastlmm.util.runner import *
import os
import logging
try:
import dill as pickle
except:
logging.warning("Can't import dill, so won't be able to clusterize lambda expressions. If you try, you'll get this error 'Can't pickle <type 'function'>: attribute lookup __builtin__.function failed'")
import cPickle as pickle
import subprocess, sys, os.path
import threading
import fastlmm.util.util as util
from Queue import PriorityQueue
class LocalMultiThread: # implements IRunner
'''Designed so that reduce will start running as soon as the 1st task as finished
'''
def __init__(self, taskcount, mkl_num_threads = None, just_one_process = False,):
if not 0 < taskcount: raise Exception("Expect taskcount to be positive")
self.taskcount = taskcount
self.just_one_process = just_one_process
if mkl_num_threads != None:
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
def _result_sequence(self,thread_list,priority_queue,shaped_distributable):
for thread in thread_list:
if not self.just_one_process:
thread.join()
result_sequence = priority_queue.get()[1]
for result in result_sequence:
yield result
def run(self, distributable):
JustCheckExists().input(distributable)
priority_queue = PriorityQueue()
thread_list = []
shaped_distributable = shape_to_desired_workcount(distributable, self.taskcount)
for taskindex in xrange(self.taskcount):
def _target(taskindex=taskindex):
result_list = []
for work in work_sequence_for_one_index(shaped_distributable, self.taskcount, taskindex):
result_list.append(run_all_in_memory(work))
priority_queue.put((taskindex,result_list))
if not self.just_one_process:
thread = threading.Thread(target=_target,name=str(taskindex))
thread_list.append(thread)
thread.start()
else:
thread_list.append(None)
_target()
result_sequence = self._result_sequence(thread_list, priority_queue,shaped_distributable)
result = shaped_distributable.reduce(result_sequence)
JustCheckExists().output(distributable)
return result
| apache-2.0 | -3,851,306,007,927,130,000 | 37.90625 | 205 | 0.64739 | false |
riddlezyc/geolab | src/energyforce/metad/calc.py | 1 | 7123 | #coding=utf-8
import numpy as np
# 通过PBC边界条件计算两个原子之间的最小镜像距离
def dis_pbc(a, b, c, alpha, beta, gamma, t1, t2):
cosalpha = np.cos(alpha * np.pi / 180)
sinalpha = np.sin(alpha * np.pi / 180)
cosbeta = np.cos(beta * np.pi / 180)
cosgamma = np.cos(gamma * np.pi / 180)
singamma = np.sin(gamma * np.pi / 180)
Ax = a
Bx = b * cosgamma
By = b * singamma
Cx = cosbeta
Cy = (cosalpha - cosbeta * cosgamma) / singamma
Cz = np.sqrt(1.0 - Cx * Cx - Cy * Cy)
Cx = c * Cx
Cy = c * Cy
Cz = c * Cz
xmin = np.abs(t1[0] - t2[0]) - Ax * np.round(np.abs(t1[0] - t2[0]) / Ax)
ymin = np.abs(t1[1] - t2[1]) - By * np.round(np.abs(t1[1] - t2[1]) / By)
zmin = np.abs(t1[2] - t2[2]) - Cz * np.round(np.abs(t1[2] - t2[2]) / Cz)
return np.sqrt(xmin * xmin + ymin * ymin + zmin * zmin)
# 适用于任何体系,即输入 a,b,c,alpha,beta,gamma
def dis(pbca, pbcb, pbcc, t1, t2):
xmin = np.abs(t1[0] - t2[0]) - pbca * np.round(np.abs(t1[0] - t2[0]) / pbca)
ymin = np.abs(t1[1] - t2[1]) - pbcb * np.round(np.abs(t1[1] - t2[1]) / pbcb)
zmin = np.abs(t1[2] - t2[2]) - pbcc * np.round(np.abs(t1[2] - t2[2]) / pbcc)
return xmin,ymin,np.sqrt(xmin * xmin + ymin * ymin + zmin * zmin)
# 适用于正交体系,只输入a,b,c, alpha,beta,gamma 均为90度
def dis_npbc(t1, t2):
xmin = t1[0] - t2[0]
ymin = t1[1] - t2[1]
zmin = t1[2] - t2[2]
return np.sqrt(xmin * xmin + ymin * ymin + zmin * zmin)
# 适用于不考虑周期性的体系
def plane(h1, h2, o):
x1, y1, z1 = [h1[0]-o[0], h1[1]-o[1], h1[2]-o[2]]
x2, y2, z2 = [h2[0]-o[0], h2[1]-o[1], h2[2]-o[2]]
b = (x2 * z1 - x1 * z2) / (y2 * z1 - y1 * z2)
c = (x1 * y2 - x2 * y1) / (y2 * z1 - y1 * z2)
return np.arccos(c / np.sqrt(1 + b * b + c * c)) * 180 / np.pi
# 通过水分子3个原子的坐标计算由3个原子确定的平面的法线与z方向的夹角的余弦值
def polar(h1, h2, o):
x1 = (h1[0] + h2[0]) * 0.5
y1 = (h1[1] + h2[1]) * 0.5
z1 = (h1[2] + h2[2]) * 0.5
x, y, z = [x1-o[0], y1-o[1], z1-o[2]]
r = np.sqrt(x * x + y * y + z * z)
return np.arccos(z / r * 180 / np.pi)
# 以2个H的中点作为正电荷中心,O坐标为负电荷中心,两者连线方向为偶极矩方向,返回此方向与z的夹角的余弦值
def ori(o,h):
x, y, z = [h[0]-o[0], h[1]-o[1], h[2]-o[2]]
r = np.sqrt(x * x + y * y + z * z)
return np.arccos(np.abs(z / r)) * 180 / np.pi
# 用于OH根(或其他两个原子)与Z夹角的余弦值
def plane_abs(h1, h2, o):
x1, y1, z1 = [h1[0]-o[0], h1[1]-o[1], h1[2]-o[2]]
x2, y2, z2 = [h2[0]-o[0], h2[1]-o[1], h2[2]-o[2]]
b = (x2 * z1 - x1 * z2) / (y2 * z1 - y1 * z2)
c = (x1 * y2 - x2 * y1) / (y2 * z1 - y1 * z2)
return np.arccos(np.abs(c / np.sqrt(1 + b * b + c * c))) * 180 / np.pi
# 通过水分子3个原子的坐标计算由3个原子确定的平面的发现与z方向的夹角的余弦值
# 返回的为绝对值
def polar_abs(h1, h2, o):
x1 = (h1[0] + h2[0]) * 0.5
y1 = (h1[1] + h2[1]) * 0.5
z1 = (h1[2] + h2[2]) * 0.5
x, y, z = [x1-o[0], y1-o[1], z1-o[2]]
r = np.sqrt(x * x + y * y + z * z)
return np.arccos(np.abs(z / r )) * 180 / np.pi
# 以2个H的中点作为正电荷中心,O坐标为负电荷中心,两者连线方向为偶极矩方向,返回此方向与z的夹角的余弦值
# 返回的为绝对值
def hbond_pbc(a, b, c, alpha, beta, gamma, donor, h, acceptor):
cosalpha = np.cos(alpha * np.pi / 180)
sinalpha = np.sin(alpha * np.pi / 180)
cosbeta = np.cos(beta * np.pi / 180)
cosgamma = np.cos(gamma * np.pi / 180)
singamma = np.sin(gamma * np.pi / 180)
Ax = a
Bx = b * cosgamma
By = b * singamma
Cx = cosbeta
Cy = (cosalpha - cosbeta * cosgamma) / singamma
Cz = np.sqrt(1.0 - Cx * Cx - Cy * Cy)
Cx = c * Cx
Cy = c * Cy
Cz = c * Cz
# H-acceptor间的距离
xmin = np.abs(h[0] - acceptor[0]) - Ax * np.round(np.abs(h[0] - acceptor[0]) / Ax)
ymin = np.abs(h[1] - acceptor[1]) - By * np.round(np.abs(h[1] - acceptor[1]) / By)
zmin = np.abs(h[2] - acceptor[2]) - Cz * np.round(np.abs(h[2] - acceptor[2]) / Cz)
# O-O距离
# xmin = np.abs(donor[0] - acceptor[0]) - Ax * np.round(np.abs(donor[0] - acceptor[0]) / Ax)
# ymin = np.abs(donor[1] - acceptor[1]) - By * np.round(np.abs(donor[1] - acceptor[1]) / By)
# zmin = np.abs(donor[2] - acceptor[2]) - Cz * np.round(np.abs(donor[2] - acceptor[2]) / Cz)
r = np.sqrt(xmin * xmin + ymin * ymin + zmin * zmin)
# x1 = donor[0] - h[0] - Ax * np.round((donor[0] - h[0]) / Ax)
# y1 = donor[1] - h[1] - By * np.round((donor[1] - h[1]) / By)
# z1 = donor[2] - h[2] - Cz * np.round((donor[2] - h[2]) / Cz)
#
# x2 = (h[0] - acceptor[0]) - Ax * np.round((h[0] - acceptor[0]) / Ax)
# y2 = (h[1] - acceptor[1]) - By * np.round((h[1] - acceptor[1]) / By)
# z2 = (h[2] - acceptor[2]) - Cz * np.round((h[2] - acceptor[2]) / Cz)
x1 = acceptor[0] - donor[0] - Ax * np.round((acceptor[0] - donor[0]) / Ax)
y1 = acceptor[1] - donor[1] - By * np.round((acceptor[1] - donor[1]) / By)
z1 = acceptor[2] - donor[2] - Cz * np.round((acceptor[2] - donor[2]) / Cz)
x2 = (h[0] - donor[0]) - Ax * np.round((h[0] - donor[0]) / Ax)
y2 = (h[1] - donor[1]) - By * np.round((h[1] - donor[1]) / By)
z2 = (h[2] - donor[2]) - Cz * np.round((h[2] - donor[2]) / Cz)
dh = np.array([x1, y1, z1])
da = np.array([x2, y2, z2])
angle = np.arccos(sum(dh * da) / (np.sqrt(sum(dh * dh)) * np.sqrt(sum(da * da)))) * 180 / np.pi
return r, angle
# 因为将距离和角度分开计算很费时间,于是拿出来单独作为计算氢键的函数
# 这里只返回计算的r及angle(度)的值,不进行判断,在调用时在根据设定的cutoff判断
def get_cell(a, b, c, alpha, beta, gamma):
cosalpha = np.cos(alpha * np.pi / 180)
sinalpha = np.sin(alpha * np.pi / 180)
cosbeta = np.cos(beta * np.pi / 180)
cosgamma = np.cos(gamma * np.pi / 180)
singamma = np.sin(gamma * np.pi / 180)
Ax = a
Ay = 0
Az = 0
Bx = b * cosgamma
By = b * singamma
Bz = 0
Cx = cosbeta
Cy = (cosalpha - cosbeta * cosgamma) / singamma
Cz = np.sqrt(1.0 - Cx * Cx - Cy * Cy)
Cx = c * Cx
Cy = c * Cy
Cz = c * Cz
return Ax, Ay, Az, Bx, By, Bz, Cx, Cy, Cz
# auto correlation function
def acf(d1):
d1unbiased = d1 - np.mean(d1)
d1norm = np.sum(d1unbiased**2)
ac = np.correlate(d1unbiased,d1unbiased,"same")/d1norm
return ac[len(ac)/2:]
#acf2 is very slow
def acf2(x, length):
return np.array([1] + [np.corrcoef(x[:-i], x[i:])[0,1] \
for i in range(1, length)])
#auto correlation time
def act(x):
t = 0
for i in range(len(x)):
if x[i]<=0.001:
t=i
break
return t
| gpl-3.0 | 4,224,248,938,134,856,700 | 30.00495 | 99 | 0.503944 | false |
luzheqi1987/nova-annotation | nova/tests/unit/virt/libvirt/test_utils.py | 1 | 25579 | # Copyright 2012 NTT Data. All Rights Reserved.
# Copyright 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import tempfile
import mock
from oslo.concurrency import processutils
from oslo.config import cfg
from nova import exception
from nova.openstack.common import fileutils
from nova import test
from nova import utils
from nova.virt.disk import api as disk
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
class LibvirtUtilsTestCase(test.NoDBTestCase):
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_get_disk_type(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
mock_execute.return_value = (example_output, '')
disk_type = libvirt_utils.get_disk_type(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
mock_exists.assert_called_once_with(path)
self.assertEqual('raw', disk_type)
@mock.patch('nova.utils.execute')
def test_copy_image_local_cp(self, mock_execute):
libvirt_utils.copy_image('src', 'dest')
mock_execute.assert_called_once_with('cp', 'src', 'dest')
_rsync_call = functools.partial(mock.call,
'rsync', '--sparse', '--compress')
@mock.patch('nova.utils.execute')
def test_copy_image_rsync(self, mock_execute):
libvirt_utils.copy_image('src', 'dest', host='host')
mock_execute.assert_has_calls([
self._rsync_call('--dry-run', 'src', 'host:dest'),
self._rsync_call('src', 'host:dest'),
])
self.assertEqual(2, mock_execute.call_count)
@mock.patch('nova.utils.execute')
def test_copy_image_scp(self, mock_execute):
mock_execute.side_effect = [
processutils.ProcessExecutionError,
mock.DEFAULT,
]
libvirt_utils.copy_image('src', 'dest', host='host')
mock_execute.assert_has_calls([
self._rsync_call('--dry-run', 'src', 'host:dest'),
mock.call('scp', 'src', 'host:dest'),
])
self.assertEqual(2, mock_execute.call_count)
@mock.patch('os.path.exists', return_value=True)
def test_disk_type(self, mock_exists):
# Seems like lvm detection
# if its in /dev ??
for p in ['/dev/b', '/dev/blah/blah']:
d_type = libvirt_utils.get_disk_type(p)
self.assertEqual('lvm', d_type)
# Try rbd detection
d_type = libvirt_utils.get_disk_type('rbd:pool/instance')
self.assertEqual('rbd', d_type)
# Try the other types
template_output = """image: %(path)s
file format: %(format)s
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
"""
path = '/myhome/disk.config'
for f in ['raw', 'qcow2']:
output = template_output % ({
'format': f,
'path': path,
})
with mock.patch('nova.utils.execute',
return_value=(output, '')) as mock_execute:
d_type = libvirt_utils.get_disk_type(path)
mock_execute.assert_called_once_with(
'env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
self.assertEqual(f, d_type)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_disk_backing(self, mock_execute, mock_exists):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: 2K (2048 bytes)
cluster_size: 65536
disk size: 96K
"""
output = template_output % ({
'path': path,
})
mock_execute.return_value = (output, '')
d_backing = libvirt_utils.get_disk_backing_file(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
mock_exists.assert_called_once_with(path)
self.assertIsNone(d_backing)
def _test_disk_size(self, mock_execute, path, expected_size):
d_size = libvirt_utils.get_disk_size(path)
self.assertEqual(expected_size, d_size)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
@mock.patch('os.path.exists', return_value=True)
def test_disk_size(self, mock_exists):
path = '/myhome/disk.config'
template_output = """image: %(path)s
file format: raw
virtual size: %(v_size)s (%(vsize_b)s bytes)
cluster_size: 65536
disk size: 96K
"""
for i in range(0, 128):
bytes = i * 65336
kbytes = bytes / 1024
mbytes = kbytes / 1024
output = template_output % ({
'v_size': "%sM" % (mbytes),
'vsize_b': i,
'path': path,
})
with mock.patch('nova.utils.execute',
return_value=(output, '')) as mock_execute:
self._test_disk_size(mock_execute, path, i)
output = template_output % ({
'v_size': "%sK" % (kbytes),
'vsize_b': i,
'path': path,
})
with mock.patch('nova.utils.execute',
return_value=(output, '')) as mock_execute:
self._test_disk_size(mock_execute, path, i)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_canon(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
blah BLAH: bb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_canon2(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: QCOW2
virtual size: 67108844
cluster_size: 65536
disk size: 963434
backing file: /var/lib/nova/a328c7998805951a_2
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('qcow2', image_info.file_format)
self.assertEqual(67108844, image_info.virtual_size)
self.assertEqual(963434, image_info.disk_size)
self.assertEqual(65536, image_info.cluster_size)
self.assertEqual('/var/lib/nova/a328c7998805951a_2',
image_info.backing_file)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_backing_file_actual(self,
mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
cluster_size: 65536
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
backing file: /var/lib/nova/a328c7998805951a_2 (actual path: /b/3a988059e51a_2)
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(1, len(image_info.snapshots))
self.assertEqual('/b/3a988059e51a_2',
image_info.backing_file)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_convert(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
junk stuff: bbb
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_qemu_info_snaps(self, mock_execute, mock_exists):
path = "disk.config"
example_output = """image: disk.config
file format: raw
virtual size: 64M (67108864 bytes)
disk size: 96K
Snapshot list:
ID TAG VM SIZE DATE VM CLOCK
1 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
3 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
4 d9a9784a500742a7bb95627bb3aace38 0 2012-08-20 10:52:46 00:00:00.000
"""
mock_execute.return_value = (example_output, '')
image_info = images.qemu_img_info(path)
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
mock_exists.assert_called_once_with(path)
self.assertEqual('disk.config', image_info.image)
self.assertEqual('raw', image_info.file_format)
self.assertEqual(67108864, image_info.virtual_size)
self.assertEqual(98304, image_info.disk_size)
self.assertEqual(3, len(image_info.snapshots))
def test_valid_hostname_normal(self):
self.assertTrue(libvirt_utils.is_valid_hostname("hello.world.com"))
def test_valid_hostname_ipv4addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("10.0.2.1"))
def test_valid_hostname_ipv6addr(self):
self.assertTrue(libvirt_utils.is_valid_hostname("240:2ac3::2"))
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
@mock.patch('nova.utils.execute')
def test_create_image(self, mock_execute):
libvirt_utils.create_image('raw', '/some/path', '10G')
libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
expected_args = [(('qemu-img', 'create', '-f', 'raw',
'/some/path', '10G'),),
(('qemu-img', 'create', '-f', 'qcow2',
'/some/stuff', '1234567891234'),)]
self.assertEqual(expected_args, mock_execute.call_args_list)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_create_cow_image(self, mock_execute, mock_exists):
mock_execute.return_value = ('stdout', None)
libvirt_utils.create_cow_image('/some/path', '/the/new/cow')
expected_args = [(('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', '/some/path'),),
(('qemu-img', 'create', '-f', 'qcow2',
'-o', 'backing_file=/some/path',
'/the/new/cow'),)]
self.assertEqual(expected_args, mock_execute.call_args_list)
def test_pick_disk_driver_name(self):
type_map = {'kvm': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'qemu': ([True, 'qemu'], [False, 'qemu'], [None, 'qemu']),
'xen': ([True, 'phy'], [False, 'tap2'], [None, 'tap2']),
'uml': ([True, None], [False, None], [None, None]),
'lxc': ([True, None], [False, None], [None, None])}
for (virt_type, checks) in type_map.iteritems():
if virt_type == "xen":
version = 4001000
else:
version = 1005001
self.flags(virt_type=virt_type, group='libvirt')
for (is_block_dev, expected_result) in checks:
result = libvirt_utils.pick_disk_driver_name(version,
is_block_dev)
self.assertEqual(result, expected_result)
def test_pick_disk_driver_name_xen_4_0_0(self):
self.flags(virt_type="xen", group='libvirt')
result = libvirt_utils.pick_disk_driver_name(4000000, False)
self.assertEqual(result, "tap")
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.utils.execute')
def test_get_disk_size(self, mock_execute, mock_exists):
path = '/some/path'
example_output = """image: 00000001
file format: raw
virtual size: 4.4M (4592640 bytes)
disk size: 4.4M
"""
mock_execute.return_value = (example_output, '')
self.assertEqual(4592640, disk.get_disk_size('/some/path'))
mock_execute.assert_called_once_with('env', 'LC_ALL=C', 'LANG=C',
'qemu-img', 'info', path)
mock_exists.assert_called_once_with(path)
def test_copy_image(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
src_fd, src_path = tempfile.mkstemp()
try:
with os.fdopen(src_fd, 'w') as fp:
fp.write('canary')
libvirt_utils.copy_image(src_path, dst_path)
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'canary')
finally:
os.unlink(src_path)
finally:
os.unlink(dst_path)
def test_write_to_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
libvirt_utils.write_to_file(dst_path, 'hello')
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_write_to_file_with_umask(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
os.unlink(dst_path)
libvirt_utils.write_to_file(dst_path, 'hello', umask=0o277)
with open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
mode = os.stat(dst_path).st_mode
self.assertEqual(mode & 0o277, 0)
finally:
os.unlink(dst_path)
@mock.patch.object(utils, 'execute')
def test_chown(self, mock_execute):
libvirt_utils.chown('/some/path', 'soren')
mock_execute.assert_called_once_with('chown', 'soren', '/some/path',
run_as_root=True)
@mock.patch.object(utils, 'execute')
def test_chown_for_id_maps(self, mock_execute):
id_maps = [vconfig.LibvirtConfigGuestUIDMap(),
vconfig.LibvirtConfigGuestUIDMap(),
vconfig.LibvirtConfigGuestGIDMap(),
vconfig.LibvirtConfigGuestGIDMap()]
id_maps[0].target = 10000
id_maps[0].count = 2000
id_maps[1].start = 2000
id_maps[1].target = 40000
id_maps[1].count = 2000
id_maps[2].target = 10000
id_maps[2].count = 2000
id_maps[3].start = 2000
id_maps[3].target = 40000
id_maps[3].count = 2000
libvirt_utils.chown_for_id_maps('/some/path', id_maps)
execute_args = ('nova-idmapshift', '-i',
'-u', '0:10000:2000,2000:40000:2000',
'-g', '0:10000:2000,2000:40000:2000',
'/some/path')
mock_execute.assert_called_once_with(*execute_args, run_as_root=True)
def _do_test_extract_snapshot(self, mock_execute,
dest_format='raw', out_format='raw'):
libvirt_utils.extract_snapshot('/path/to/disk/image', 'qcow2',
'/extracted/snap', dest_format)
mock_execute.assert_called_once_with(
'qemu-img', 'convert', '-f', 'qcow2', '-O', out_format,
'/path/to/disk/image', '/extracted/snap')
@mock.patch.object(utils, 'execute')
def test_extract_snapshot_raw(self, mock_execute):
self._do_test_extract_snapshot(mock_execute)
@mock.patch.object(utils, 'execute')
def test_extract_snapshot_iso(self, mock_execute):
self._do_test_extract_snapshot(mock_execute, dest_format='iso')
@mock.patch.object(utils, 'execute')
def test_extract_snapshot_qcow2(self, mock_execute):
self._do_test_extract_snapshot(mock_execute,
dest_format='qcow2', out_format='qcow2')
def test_load_file(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
self.assertEqual(libvirt_utils.load_file(dst_path), 'hello')
finally:
os.unlink(dst_path)
def test_file_open(self):
dst_fd, dst_path = tempfile.mkstemp()
try:
os.close(dst_fd)
# We have a test for write_to_file. If that is sound, this suffices
libvirt_utils.write_to_file(dst_path, 'hello')
with libvirt_utils.file_open(dst_path, 'r') as fp:
self.assertEqual(fp.read(), 'hello')
finally:
os.unlink(dst_path)
def test_get_fs_info(self):
class FakeStatResult(object):
def __init__(self):
self.f_bsize = 4096
self.f_frsize = 4096
self.f_blocks = 2000
self.f_bfree = 1000
self.f_bavail = 900
self.f_files = 2000
self.f_ffree = 1000
self.f_favail = 900
self.f_flag = 4096
self.f_namemax = 255
self.path = None
def fake_statvfs(path):
self.path = path
return FakeStatResult()
self.stubs.Set(os, 'statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEqual('/some/file/path', self.path)
self.assertEqual(8192000, fs_info['total'])
self.assertEqual(3686400, fs_info['free'])
self.assertEqual(4096000, fs_info['used'])
@mock.patch('nova.virt.images.fetch_to_raw')
def test_fetch_image(self, mock_images):
context = 'opaque context'
target = '/tmp/targetfile'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
libvirt_utils.fetch_image(context, target, image_id,
user_id, project_id)
mock_images.assert_called_once_with(
context, image_id, target, user_id, project_id,
max_size=0)
def test_fetch_raw_image(self):
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
def fake_rename(old, new):
self.executes.append(('mv', old, new))
def fake_unlink(path):
self.executes.append(('rm', path))
def fake_rm_on_error(path, remove=None):
self.executes.append(('rm', '-f', path))
def fake_qemu_img_info(path):
class FakeImgInfo(object):
pass
file_format = path.split('.')[-1]
if file_format == 'part':
file_format = path.split('.')[-2]
elif file_format == 'converted':
file_format = 'raw'
if 'backing' in path:
backing_file = 'backing'
else:
backing_file = None
if 'big' in path:
virtual_size = 2
else:
virtual_size = 1
FakeImgInfo.file_format = file_format
FakeImgInfo.backing_file = backing_file
FakeImgInfo.virtual_size = virtual_size
return FakeImgInfo()
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os, 'rename', fake_rename)
self.stubs.Set(os, 'unlink', fake_unlink)
self.stubs.Set(images, 'fetch', lambda *_, **__: None)
self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_error)
# Since the remove param of fileutils.remove_path_on_error()
# is initialized at load time, we must provide a wrapper
# that explicitly resets it to our fake delete_if_exists()
old_rm_path_on_error = fileutils.remove_path_on_error
f = functools.partial(old_rm_path_on_error, remove=fake_rm_on_error)
self.stubs.Set(fileutils, 'remove_path_on_error', f)
context = 'opaque context'
image_id = '4'
user_id = 'fake'
project_id = 'fake'
target = 't.qcow2'
self.executes = []
expected_commands = [('qemu-img', 'convert', '-O', 'raw',
't.qcow2.part', 't.qcow2.converted'),
('rm', 't.qcow2.part'),
('mv', 't.qcow2.converted', 't.qcow2')]
images.fetch_to_raw(context, image_id, target, user_id, project_id,
max_size=1)
self.assertEqual(self.executes, expected_commands)
target = 't.raw'
self.executes = []
expected_commands = [('mv', 't.raw.part', 't.raw')]
images.fetch_to_raw(context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 'backing.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'backing.qcow2.part')]
self.assertRaises(exception.ImageUnacceptable,
images.fetch_to_raw,
context, image_id, target, user_id, project_id)
self.assertEqual(self.executes, expected_commands)
target = 'big.qcow2'
self.executes = []
expected_commands = [('rm', '-f', 'big.qcow2.part')]
self.assertRaises(exception.FlavorDiskTooSmall,
images.fetch_to_raw,
context, image_id, target, user_id, project_id,
max_size=1)
self.assertEqual(self.executes, expected_commands)
del self.executes
def test_get_disk_backing_file(self):
with_actual_path = False
def fake_execute(*args, **kwargs):
if with_actual_path:
return ("some: output\n"
"backing file: /foo/bar/baz (actual path: /a/b/c)\n"
"...: ...\n"), ''
else:
return ("some: output\n"
"backing file: /foo/bar/baz\n"
"...: ...\n"), ''
def return_true(*args, **kwargs):
return True
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
with_actual_path = True
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'c')
| apache-2.0 | 1,146,003,465,233,312,800 | 38.231595 | 79 | 0.564135 | false |
intelligent-agent/redeem | redeem/gcodes/M561.py | 1 | 1479 | """
GCode M561
Example: M561
This cancels any bed-plane fitting as the result of probing (or anything else) and returns the machine to moving in the user's coordinate system.
Author: Elias Bakken
License: CC BY-SA: http://creativecommons.org/licenses/by-sa/2.0/
"""
from __future__ import absolute_import
import json
import numpy as np
from .GCodeCommand import GCodeCommand
from redeem.BedCompensation import BedCompensation
class M561(GCodeCommand):
def execute(self, g):
# Show matrix
if g.has_letter("S"):
self.printer.send_message(
g.prot, "Current bed compensation matrix: {}".format(
json.dumps(self.printer.matrix_bed_comp.tolist())))
# Update matrix
elif g.has_letter("U"):
mat = BedCompensation.create_rotation_matrix(self.printer.probe_points,
self.printer.probe_heights)
self.printer.matrix_bed_comp = mat
# Reset matrix
else:
self.printer.matrix_bed_comp = np.identity(3)
def get_description(self):
return "Show, update or reset bed level matrix to identity"
def get_long_description(self):
return ("This cancels any bed-plane fitting as the result of probing"
" (or anything else) and returns the machine "
"to moving in the user's coordinate system.\n"
"Add 'S' to show the marix instead of resetting it.\n"
"Add 'U' to update the current matrix based on probe data")
| gpl-3.0 | -6,170,218,593,551,022,000 | 31.866667 | 145 | 0.665314 | false |
StochasticNumerics/mimclib | tests/matern/echo_test_cmd.py | 1 | 1481 | #!/usr/bin/python
import numpy as np
import argparse
parser = argparse.ArgumentParser(add_help=True)
parser.register('type', 'bool',
lambda v: v.lower() in ("yes", "true", "t", "1"))
parser.add_argument("-db", type="bool", action="store", default=False)
parser.add_argument("-qoi_dim", type=int, action="store",
default=1, help="MIMC dim")
parser.add_argument("-qoi_df_nu", type=float, action="store",
default=3.5, help="MIMC dim")
args, unknowns = parser.parse_known_args()
if args.qoi_dim:
base = "\
mimc_run.py -mimc_TOL {TOL} -qoi_seed 0 \
-qoi_problem 0 -qoi_sigma 0.2 \
-mimc_min_dim {qoi_dim} -qoi_dim {qoi_dim} -qoi_df_nu {qoi_df_nu} \
-qoi_x0 0.3 0.4 0.6 -ksp_rtol 1e-25 -ksp_type gmres \
-qoi_a0 0 -qoi_f0 1 \
-qoi_scale 10 -qoi_df_sig 0.5 -mimc_M0 1 \
-mimc_beta {beta} -mimc_gamma {gamma} -mimc_h0inv 3 \
-mimc_bayes_fit_lvls 3 -mimc_moments 1 -mimc_bayesian False \
".format(TOL="{TOL}", qoi_df_nu=args.qoi_df_nu, qoi_dim=args.qoi_dim,
beta=" ".join([str("2")]*args.qoi_dim),
gamma=" ".join([str("1")]*args.qoi_dim))
else:
assert False
base += " ".join(unknowns)
if not args.db:
cmd_single = "python " + base + " -mimc_verbose 10 -db False "
print(cmd_single.format(TOL=0.001))
else:
cmd_multi = "python " + base + " -mimc_verbose 0 -db True -db_tag {tag} "
print cmd_multi.format(tag="misc_matern_d{:d}_nu{:.2g}".format(args.qoi_dim, args.qoi_df_nu), TOL=1e-10)
| gpl-2.0 | -6,952,863,911,539,885,000 | 36.974359 | 108 | 0.6158 | false |
UAVCAN/pyuavcan | pyuavcan/transport/_data_specifier.py | 1 | 1424 | # Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <[email protected]>
from __future__ import annotations
import enum
import dataclasses
@dataclasses.dataclass(frozen=True)
class DataSpecifier:
"""
The data specifier defines what category and type of data is exchanged over a transport session.
See the abstract transport model for details.
"""
@dataclasses.dataclass(frozen=True)
class MessageDataSpecifier(DataSpecifier):
SUBJECT_ID_MASK = 2 ** 13 - 1
subject_id: int
def __post_init__(self) -> None:
if not (0 <= self.subject_id <= self.SUBJECT_ID_MASK):
raise ValueError(f"Invalid subject ID: {self.subject_id}")
@dataclasses.dataclass(frozen=True)
class ServiceDataSpecifier(DataSpecifier):
class Role(enum.Enum):
REQUEST = enum.auto()
"""
Request output role is for clients.
Request input role is for servers.
"""
RESPONSE = enum.auto()
"""
Response output role is for servers.
Response input role is for clients.
"""
SERVICE_ID_MASK = 2 ** 9 - 1
service_id: int
role: Role
def __post_init__(self) -> None:
assert self.role in self.Role
if not (0 <= self.service_id <= self.SERVICE_ID_MASK):
raise ValueError(f"Invalid service ID: {self.service_id}")
| mit | -7,368,559,777,367,193,000 | 26.921569 | 100 | 0.649579 | false |
adbuerger/PECas | test/test_systems.py | 1 | 3895 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 Adrian Bürger
#
# This file is part of PECas.
#
# PECas is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PECas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PECas. If not, see <http://www.gnu.org/licenses/>.
# Test the classes fo system definitions
import pecas
import casadi as ca
import unittest
class TestSystemsInit(unittest.TestCase):
def test_basic_system_init(self):
self.t = ca.MX.sym("t", 1)
self.u = ca.MX.sym("u", 1)
self.p = ca.MX.sym("p", 1)
self.phi = ca.MX.sym("phi", 1)
self.g = ca.MX.sym("g", 1)
sys = pecas.systems.BasicSystem(p = self.p, phi = self.phi)
sys.show_system_information(showEquations = True)
sys = pecas.systems.BasicSystem(t = self.t, p = self.p, phi = self.phi)
sys.show_system_information(showEquations = True)
sys = pecas.systems.BasicSystem(t = self.t, u = self.u, p = self.p, \
phi = self.phi)
sys.show_system_information(showEquations = True)
sys = pecas.systems.BasicSystem(t = self.t, u = self.u, p = self.p, \
phi = self.phi, g = self.g)
sys.show_system_information(showEquations = True)
self.assertRaises(TypeError, pecas.systems.BasicSystem)
self.assertRaises(TypeError, pecas.systems.BasicSystem, p = None)
self.assertRaises(TypeError, pecas.systems.BasicSystem, phi = None)
def test_explode_system_init(self):
self.t = ca.MX.sym("t", 1)
self.u = ca.MX.sym("u", 1)
self.x = ca.MX.sym("x", 1)
self.p = ca.MX.sym("p", 1)
self.eps_e = ca.MX.sym("eps_e", 1)
self.eps_u = ca.MX.sym("eps_u", 1)
self.phi = ca.MX.sym("phi", 1)
self.f = ca.MX.sym("f", 1)
sys = pecas.systems.ExplODE(x = self.x, p = self.p, \
eps_e = self.eps_e, phi = self.phi, f = self.f)
sys.show_system_information(showEquations = True)
sys = pecas.systems.ExplODE(t = self.t, x = self.x, p = self.p, \
eps_e = self.eps_e, phi = self.phi, f = self.f)
sys.show_system_information(showEquations = True)
sys = pecas.systems.ExplODE(t = self.t, u = self.u, x = self.x, \
p = self.p, eps_e = self.eps_e, phi = self.phi, f = self.f)
sys.show_system_information(showEquations = True)
sys = pecas.systems.ExplODE(t = self.t, u = self.u, x = self.x,\
p = self.p, eps_e = self.eps_e, eps_u = self.eps_u, \
phi = self.phi, f = self.f)
sys.show_system_information(showEquations = True)
self.assertRaises(TypeError, pecas.systems.ExplODE)
self.assertRaises(TypeError, pecas.systems.ExplODE, x = None)
self.assertRaises(TypeError, pecas.systems.ExplODE, p = None)
self.assertRaises(TypeError, pecas.systems.ExplODE, w = None)
self.assertRaises(TypeError, pecas.systems.ExplODE, phi = None)
self.assertRaises(TypeError, pecas.systems.ExplODE, f = None)
# while explicit time dependecy is not allowed:
self.assertRaises(NotImplementedError, pecas.systems.ExplODE, \
t = self.t, u = self.u, x = self.x, \
p = self.p, eps_e = self.eps_e, phi = self.phi, f = self.t)
def test_implade_system_init(self):
self.assertRaises(NotImplementedError, pecas.systems.ImplDAE)
| lgpl-3.0 | -6,181,232,457,055,610,000 | 37.186275 | 79 | 0.621983 | false |
EdDev/vdsm | lib/vdsm/storage/mailbox.py | 1 | 33976 | #
# Copyright 2009-2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
import os
import errno
import time
import threading
import struct
import logging
import uuid
from six.moves import queue
from vdsm.config import config
from vdsm.storage import misc
from vdsm.storage import task
from vdsm.storage.exception import InvalidParameterException
from vdsm.storage.threadPool import ThreadPool
from vdsm import concurrent
from vdsm import constants
__author__ = "ayalb"
__date__ = "$Mar 9, 2009 5:25:07 PM$"
CHECKSUM_BYTES = 4
MAILBOX_SIZE = 4096
PACKED_UUID_SIZE = 16
VOLUME_MAX_SIZE = 0xFFFFFFFF # 64 bit unsigned max size
SIZE_CHARS = 16
MESSAGE_VERSION = "1"
MESSAGE_SIZE = 64
CLEAN_MESSAGE = "\1" * MESSAGE_SIZE
EXTEND_CODE = "xtnd"
BLOCK_SIZE = 512
REPLY_OK = 1
EMPTYMAILBOX = MAILBOX_SIZE * "\0"
SLOTS_PER_MAILBOX = int(MAILBOX_SIZE / MESSAGE_SIZE)
# Last message slot is reserved for metadata (checksum, extendable mailbox,
# etc)
MESSAGES_PER_MAILBOX = SLOTS_PER_MAILBOX - 1
_zeroCheck = misc.checksum(EMPTYMAILBOX, CHECKSUM_BYTES)
# Assumes CHECKSUM_BYTES equals 4!!!
pZeroChecksum = struct.pack('<l', _zeroCheck)
def dec2hex(n):
return "%x" % n
def runTask(args):
if type(args) == tuple:
cmd = args[0]
args = args[1:]
else:
cmd = args
args = None
ctask = task.Task(id=None, name=cmd)
ctask.prepare(cmd, *args)
def _mboxExecCmd(*args, **kwargs):
return misc.execCmd(*args, **kwargs)
class SPM_Extend_Message:
log = logging.getLogger('storage.SPM.Messages.Extend')
def __init__(self, volumeData, newSize, callbackFunction=None):
if ('poolID' not in volumeData) or \
('domainID' not in volumeData) or \
('volumeID' not in volumeData):
self.log.error('create extend msg failed for volume: %s, size:'
' %d', '-'.join(volumeData.values()), newSize)
raise InvalidParameterException('volumeData dictionary',
volumeData)
if (newSize < 0) or (newSize > VOLUME_MAX_SIZE):
raise InvalidParameterException('volumeSize', newSize)
misc.validateUUID(volumeData['domainID'], 'domainID')
misc.validateUUID(volumeData['volumeID'], 'volumeID')
self.pool = volumeData['poolID']
self.volumeData = volumeData
self.newSize = str(dec2hex(newSize))
self.callback = callbackFunction
# Message structure is rigid (order must be kept and is relied upon):
# Version (1 byte), OpCode (4 bytes), Domain UUID (16 bytes), Volume
# UUID (16 bytes), Requested size (16 bytes), Padding to 64 bytes (14
# bytes)
domain = misc.packUuid(volumeData['domainID'])
volume = misc.packUuid(volumeData['volumeID'])
# Build base payload
payload = MESSAGE_VERSION + EXTEND_CODE + domain + volume + \
self.newSize.rjust(SIZE_CHARS, "0")
# Pad payload with zeros
self.payload = payload.ljust(MESSAGE_SIZE, "0")
self.log.debug('new extend msg created: domain: %s, volume: %s',
volumeData['domainID'], volumeData['volumeID'])
def __getitem__(self, index):
return self.payload[index]
def checkReply(self, reply):
# Sanity check - Make sure reply is for current message
sizeOffset = 5 + 2 * PACKED_UUID_SIZE
if (self.payload[0:sizeOffset] != reply[0:sizeOffset]):
self.log.error("SPM_Extend_Message: Reply message volume data "
"(domainID + volumeID) differs from request "
"message, reply : %s, orig: %s", reply,
self.payload)
raise RuntimeError('Incorrect reply')
# if self.payload[sizeOffset:sizeOffset + PACKED_UUID_SIZE] > \
# reply[sizeOffset:sizeOffset + PACKED_UUID_SIZE]):
# self.log.error("SPM_Extend_Message: New size is smaller than "
# "requested size")
# raise RuntimeError('Request failed')
return REPLY_OK
@classmethod
def processRequest(cls, pool, msgID, payload):
cls.log.debug("processRequest, payload:" + repr(payload))
sdOffset = 5
volumeOffset = sdOffset + PACKED_UUID_SIZE
sizeOffset = volumeOffset + PACKED_UUID_SIZE
volume = {}
volume['poolID'] = pool.spUUID
volume['domainID'] = misc.unpackUuid(
payload[sdOffset:sdOffset + PACKED_UUID_SIZE])
volume['volumeID'] = misc.unpackUuid(
payload[volumeOffset:volumeOffset + PACKED_UUID_SIZE])
size = int(payload[sizeOffset:sizeOffset + SIZE_CHARS], 16)
cls.log.info("processRequest: extending volume %s "
"in domain %s (pool %s) to size %d", volume['volumeID'],
volume['domainID'], volume['poolID'], size)
msg = None
try:
try:
pool.extendVolume(volume['domainID'], volume['volumeID'], size)
msg = SPM_Extend_Message(volume, size)
except:
cls.log.error("processRequest: Exception caught while trying "
"to extend volume: %s in domain: %s",
volume['volumeID'], volume['domainID'],
exc_info=True)
msg = SPM_Extend_Message(volume, 0)
finally:
pool.spmMailer.sendReply(msgID, msg)
return {'status': {'code': 0, 'message': 'Done'}}
class HSM_Mailbox:
log = logging.getLogger('storage.Mailbox.HSM')
def __init__(self, hostID, poolID, inbox, outbox, monitorInterval=2):
self._hostID = str(hostID)
self._poolID = str(poolID)
self._monitorInterval = monitorInterval
self._queue = queue.Queue(-1)
self._inbox = inbox
if not os.path.exists(self._inbox):
self.log.error("HSM_Mailbox create failed - inbox %s does not "
"exist" % repr(self._inbox))
raise RuntimeError("HSM_Mailbox create failed - inbox %s does not "
"exist" % repr(self._inbox))
self._outbox = outbox
if not os.path.exists(self._outbox):
self.log.error("HSM_Mailbox create failed - outbox %s does not "
"exist" % repr(self._outbox))
raise RuntimeError("HSM_Mailbox create failed - outbox %s does "
"not exist" % repr(self._outbox))
self._mailman = HSM_MailMonitor(self._inbox, self._outbox, hostID,
self._queue, monitorInterval)
self.log.debug('HSM_MailboxMonitor created for pool %s' % self._poolID)
def sendExtendMsg(self, volumeData, newSize, callbackFunction=None):
msg = SPM_Extend_Message(volumeData, newSize, callbackFunction)
if str(msg.pool) != self._poolID:
raise ValueError('PoolID does not correspond to Mailbox pool')
self._queue.put(msg)
def stop(self):
if self._mailman:
self._mailman.immStop()
self._mailman.tp.joinAll(waitForTasks=False)
else:
self.log.warning("HSM_MailboxMonitor - No mail monitor object "
"available to stop")
def wait(self, timeout=None):
return self._mailman.wait(timeout)
def flushMessages(self):
if self._mailman:
self._mailman.immFlush()
else:
self.log.warning("HSM_MailboxMonitor - No mail monitor object "
"available to flush")
class HSM_MailMonitor(object):
log = logging.getLogger('storage.MailBox.HsmMailMonitor')
def __init__(self, inbox, outbox, hostID, queue, monitorInterval):
# Save arguments
tpSize = config.getint('irs', 'thread_pool_size') / 2
waitTimeout = wait_timeout(monitorInterval)
maxTasks = config.getint('irs', 'max_tasks')
self.tp = ThreadPool("mailbox-hsm", tpSize, waitTimeout, maxTasks)
self._stop = False
self._flush = False
self._queue = queue
self._activeMessages = {}
self._monitorInterval = monitorInterval
self._hostID = int(hostID)
self._used_slots_array = [0] * MESSAGES_PER_MAILBOX
self._outgoingMail = EMPTYMAILBOX
self._incomingMail = EMPTYMAILBOX
# TODO: add support for multiple paths (multiple mailboxes)
self._inCmd = [constants.EXT_DD,
'if=' + str(inbox),
'iflag=direct,fullblock',
'bs=' + str(MAILBOX_SIZE),
'count=1',
'skip=' + str(self._hostID)
]
self._outCmd = [constants.EXT_DD,
'of=' + str(outbox),
'iflag=fullblock',
'oflag=direct',
'conv=notrunc',
'bs=' + str(MAILBOX_SIZE),
'count=1',
'seek=' + str(self._hostID)
]
self._init = False
self._initMailbox() # Read initial mailbox state
self._msgCounter = 0
self._sendMail() # Clear outgoing mailbox
self._thread = concurrent.thread(self.run, name="mailbox-hsm",
log=self.log)
self._thread.start()
def _initMailbox(self):
# Sync initial incoming mail state with storage view
(rc, out, err) = _mboxExecCmd(self._inCmd, raw=True)
if rc == 0:
self._incomingMail = out
self._init = True
else:
self.log.warning("HSM_MailboxMonitor - Could not initialize "
"mailbox, will not accept requests until init "
"succeeds")
def immStop(self):
self._stop = True
def immFlush(self):
self._flush = True
def wait(self, timeout=None):
self._thread.join(timeout=timeout)
return not self._thread.is_alive()
def _handleResponses(self, newMsgs):
rc = False
for i in range(0, MESSAGES_PER_MAILBOX):
# Skip checking non used slots
if self._used_slots_array[i] == 0:
continue
# Skip empty return messages (messages with version 0)
start = i * MESSAGE_SIZE
# First byte of message is message version.
# Check return message version, if 0 then message is empty
if newMsgs[start] in ['\0', '0']:
continue
for j in range(start, start + MESSAGE_SIZE):
if newMsgs[j] != self._incomingMail[j]:
break
# If search exhausted then message hasn't changed since last read
# and can be skipped
if j == (start + MESSAGE_SIZE - 1):
continue
#
# We only get here if there is a novel reply so we can remove the
# message from the active list and the outgoing mail and handle the
# reply
#
rc = True
newMsg = newMsgs[start:start + MESSAGE_SIZE]
if newMsg == CLEAN_MESSAGE:
del self._activeMessages[i]
self._used_slots_array[i] = 0
self._msgCounter -= 1
self._outgoingMail = self._outgoingMail[0:start] + \
MESSAGE_SIZE * "\0" + self._outgoingMail[start +
MESSAGE_SIZE:]
continue
msg = self._activeMessages[i]
self._activeMessages[i] = CLEAN_MESSAGE
self._outgoingMail = self._outgoingMail[0:start] + \
CLEAN_MESSAGE + self._outgoingMail[start + MESSAGE_SIZE:]
try:
self.log.debug("HSM_MailboxMonitor(%s/%s) - Checking reply: "
"%s", self._msgCounter, MESSAGES_PER_MAILBOX,
repr(newMsg))
msg.checkReply(newMsg)
if msg.callback:
try:
id = str(uuid.uuid4())
if not self.tp.queueTask(id, runTask, (msg.callback,
msg.volumeData)):
raise Exception()
except:
self.log.error("HSM_MailMonitor: exception caught "
"while running msg callback, for "
"message: %s, callback function: %s",
repr(msg.payload), msg.callback,
exc_info=True)
except RuntimeError as e:
self.log.error("HSM_MailMonitor: exception: %s caught while "
"checking reply for message: %s, reply: %s",
str(e), repr(msg.payload), repr(newMsg))
except:
self.log.error("HSM_MailMonitor: exception caught while "
"checking reply from SPM, request was: %s "
"reply: %s", repr(msg.payload), repr(newMsg),
exc_info=True)
# Finished processing incoming mail, now save mail to compare against
# next batch
self._incomingMail = newMsgs
return rc
def _checkForMail(self):
# self.log.debug("HSM_MailMonitor - checking for mail")
# self.log.debug("Running command: " + str(self._inCmd))
(rc, in_mail, err) = misc.execCmd(self._inCmd, raw=True)
if rc:
raise RuntimeError("_handleResponses.Could not read mailbox - rc "
"%s" % rc)
if (len(in_mail) != MAILBOX_SIZE):
raise RuntimeError("_handleResponses.Could not read mailbox - len "
"%s != %s" % (len(in_mail), MAILBOX_SIZE))
# self.log.debug("Parsing inbox content: %s", in_mail)
return self._handleResponses(in_mail)
def _sendMail(self):
self.log.info("HSM_MailMonitor sending mail to SPM - " +
str(self._outCmd))
chk = misc.checksum(
self._outgoingMail[0:MAILBOX_SIZE - CHECKSUM_BYTES],
CHECKSUM_BYTES)
pChk = struct.pack('<l', chk) # Assumes CHECKSUM_BYTES equals 4!!!
self._outgoingMail = \
self._outgoingMail[0:MAILBOX_SIZE - CHECKSUM_BYTES] + pChk
_mboxExecCmd(self._outCmd, data=self._outgoingMail)
def _handleMessage(self, message):
# TODO: add support for multiple mailboxes
freeSlot = False
for i in range(0, MESSAGES_PER_MAILBOX):
if self._used_slots_array[i] == 0:
if not freeSlot:
freeSlot = i
continue
duplicate = True
for j in range(0, MESSAGE_SIZE):
if message[j] != self._activeMessages[i][j]:
duplicate = False
break
if duplicate:
self.log.debug("HSM_MailMonitor - ignoring duplicate message "
"%s" % (repr(message)))
return
if not freeSlot:
raise RuntimeError("HSM_MailMonitor - Active messages list full, "
"cannot add new message")
self._msgCounter += 1
self._used_slots_array[freeSlot] = 1
self._activeMessages[freeSlot] = message
start = freeSlot * MESSAGE_SIZE
end = start + MESSAGE_SIZE
self._outgoingMail = self._outgoingMail[0:start] + message.payload + \
self._outgoingMail[end:]
self.log.debug("HSM_MailMonitor - start: %s, end: %s, len: %s, "
"message(%s/%s): %s" %
(start, end, len(self._outgoingMail), self._msgCounter,
MESSAGES_PER_MAILBOX,
repr(self._outgoingMail[start:end])))
def run(self):
try:
failures = 0
# Do not start processing requests before incoming mailbox is
# initialized
while not self._init and not self._stop:
try:
time.sleep(2)
self._initMailbox() # Read initial mailbox state
except:
pass
while not self._stop:
try:
message = None
sendMail = False
# If no message is pending, block_wait until a new message
# or stop command arrives
while not self._stop and not message and \
not self._activeMessages:
try:
# self.log.debug("No requests in queue, going to "
# "sleep until new requests arrive")
# Check if a new message is waiting to be sent
message = self._queue.get(
block=True, timeout=self._monitorInterval)
self._handleMessage(message)
message = None
sendMail = True
except queue.Empty:
pass
if self._stop:
break
# If pending messages available, check if there are new
# messages waiting in queue as well
empty = False
while (not empty) and \
(len(self._activeMessages) < MESSAGES_PER_MAILBOX):
# TODO: Remove single mailbox limitation
try:
message = self._queue.get(block=False)
self._handleMessage(message)
message = None
sendMail = True
except queue.Empty:
empty = True
if self._flush:
self._flush = False
sendMail = True
try:
sendMail |= self._checkForMail()
failures = 0
except:
self.log.error("HSM_MailboxMonitor - Exception caught "
"while checking for mail",
exc_info=True)
failures += 1
if sendMail:
self._sendMail()
# If there are active messages waiting for SPM reply, wait
# a few seconds before performing another IO op
if self._activeMessages and not self._stop:
# If recurring failures then sleep for one minute
# before retrying
if (failures > 9):
time.sleep(60)
else:
time.sleep(self._monitorInterval)
except:
self.log.error("HSM_MailboxMonitor - Incoming mail"
"monitoring thread caught exception; "
"will try to recover", exc_info=True)
finally:
self.log.info("HSM_MailboxMonitor - Incoming mail monitoring "
"thread stopped, clearing outgoing mail")
self._outgoingMail = EMPTYMAILBOX
self._sendMail() # Clear outgoing mailbox
class SPM_MailMonitor:
log = logging.getLogger('storage.MailBox.SpmMailMonitor')
def registerMessageType(self, messageType, callback):
self._messageTypes[messageType] = callback
def unregisterMessageType(self, messageType):
del self._messageTypes[messageType]
def __init__(self, poolID, maxHostID, inbox, outbox, monitorInterval=2):
"""
Note: inbox paramerter here should point to the HSM's outbox
mailbox file, and vice versa.
"""
self._messageTypes = {}
# Save arguments
self._stop = False
self._stopped = False
self._poolID = poolID
tpSize = config.getint('irs', 'thread_pool_size') / 2
waitTimeout = wait_timeout(monitorInterval)
maxTasks = config.getint('irs', 'max_tasks')
self.tp = ThreadPool("mailbox-spm", tpSize, waitTimeout, maxTasks)
self._inbox = inbox
if not os.path.exists(self._inbox):
self.log.error("SPM_MailMonitor create failed - inbox %s does not "
"exist" % repr(self._inbox))
raise RuntimeError("SPM_MailMonitor create failed - inbox %s does "
"not exist" % repr(self._inbox))
self._outbox = outbox
if not os.path.exists(self._outbox):
self.log.error("SPM_MailMonitor create failed - outbox %s does "
"not exist" % repr(self._outbox))
raise RuntimeError("SPM_MailMonitor create failed - outbox %s "
"does not exist" % repr(self._outbox))
self._numHosts = int(maxHostID)
self._outMailLen = MAILBOX_SIZE * self._numHosts
self._monitorInterval = monitorInterval
# TODO: add support for multiple paths (multiple mailboxes)
self._outgoingMail = self._outMailLen * "\0"
self._incomingMail = self._outgoingMail
self._inCmd = ['dd',
'if=' + str(self._inbox),
'iflag=direct,fullblock',
'count=1'
]
self._outCmd = ['dd',
'of=' + str(self._outbox),
'oflag=direct',
'iflag=fullblock',
'conv=notrunc',
'count=1'
]
self._outLock = threading.Lock()
self._inLock = threading.Lock()
# Clear outgoing mail
self.log.debug("SPM_MailMonitor - clearing outgoing mail, command is: "
"%s", self._outCmd)
cmd = self._outCmd + ['bs=' + str(self._outMailLen)]
(rc, out, err) = _mboxExecCmd(cmd, data=self._outgoingMail)
if rc:
self.log.warning("SPM_MailMonitor couldn't clear outgoing mail, "
"dd failed")
self._thread = concurrent.thread(
self.run, name="mailbox-spm", log=self.log)
self._thread.start()
self.log.debug('SPM_MailMonitor created for pool %s' % self._poolID)
def wait(self, timeout=None):
self._thread.join(timeout=timeout)
return not self._thread.is_alive()
def stop(self):
self._stop = True
def isStopped(self):
return self._stopped
def getMaxHostID(self):
return self._numHosts
def setMaxHostID(self, newMaxId):
with self._inLock:
with self._outLock:
diff = newMaxId - self._numHosts
if diff > 0:
delta = MAILBOX_SIZE * diff * "\0"
self._outgoingMail += delta
self._incomingMail += delta
elif diff < 0:
delta = MAILBOX_SIZE * diff
self._outgoingMail = self._outgoingMail[:-delta]
self._incomingMail = self._incomingMail[:-delta]
self._numHosts = newMaxId
self._outMailLen = MAILBOX_SIZE * self._numHosts
@classmethod
def validateMailbox(self, mailbox, mailboxIndex):
"""
Return True if mailbox has a valid checksum, and is not an empty
mailbox, False otherwise.
"""
assert len(mailbox) == MAILBOX_SIZE
data = mailbox[:-CHECKSUM_BYTES]
checksum = mailbox[-CHECKSUM_BYTES:]
n = misc.checksum(data, CHECKSUM_BYTES)
expected = struct.pack('<l', n) # Assumes CHECKSUM_BYTES equals 4!!!
if checksum != expected:
self.log.error(
"mailbox %s checksum failed, not clearing mailbox, clearing "
"new mail (data=%r, checksum=%r, expected=%r)",
mailboxIndex, data, checksum, expected)
return False
elif expected == pZeroChecksum:
return False # Ignore messages of empty mailbox
return True
def _handleRequests(self, newMail):
send = False
# run through all messages and check if new messages have arrived
# (since last read)
for host in range(0, self._numHosts):
# Check mailbox checksum
mailboxStart = host * MAILBOX_SIZE
isMailboxValidated = False
for i in range(0, MESSAGES_PER_MAILBOX):
msgId = host * SLOTS_PER_MAILBOX + i
msgStart = msgId * MESSAGE_SIZE
# First byte of message is message version. Check message
# version, if 0 then message is empty and can be skipped
if newMail[msgStart] in ['\0', '0']:
continue
# Most mailboxes are probably empty so it costs less to check
# that all messages start with 0 than to validate the mailbox,
# therefor this is done after we find a non empty message in
# mailbox
if not isMailboxValidated:
if not self.validateMailbox(
newMail[mailboxStart:mailboxStart + MAILBOX_SIZE],
host):
# Cleaning invalid mbx in newMail
newMail = newMail[:mailboxStart] + EMPTYMAILBOX + \
newMail[mailboxStart + MAILBOX_SIZE:]
break
self.log.debug("SPM_MailMonitor: Mailbox %s validated, "
"checking mail", host)
isMailboxValidated = True
newMsg = newMail[msgStart:msgStart + MESSAGE_SIZE]
msgOffset = msgId * MESSAGE_SIZE
if newMsg == CLEAN_MESSAGE:
# Should probably put a setter on outgoingMail which would
# take the lock
self._outLock.acquire()
try:
self._outgoingMail = \
self._outgoingMail[0:msgOffset] + CLEAN_MESSAGE + \
self._outgoingMail[msgOffset + MESSAGE_SIZE:
self._outMailLen]
finally:
self._outLock.release()
send = True
continue
# Message isn't empty, check if its new
isMessageNew = False
for j in range(msgStart, msgStart + MESSAGE_SIZE):
if newMail[j] != self._incomingMail[j]:
isMessageNew = True
break
# If search exhausted, i.e. message hasn't changed since last
# read, it can be skipped
if not isMessageNew:
continue
# We only get here if there is a novel request
try:
msgType = newMail[msgStart + 1:msgStart + 5]
if msgType in self._messageTypes:
# Use message class to process request according to
# message specific logic
id = str(uuid.uuid4())
self.log.debug("SPM_MailMonitor: processing request: "
"%s" % repr(newMail[
msgStart:msgStart + MESSAGE_SIZE]))
res = self.tp.queueTask(
id, runTask, (self._messageTypes[msgType], msgId,
newMail[msgStart:
msgStart + MESSAGE_SIZE])
)
if not res:
raise Exception()
else:
self.log.error("SPM_MailMonitor: unknown message type "
"encountered: %s", msgType)
except RuntimeError as e:
self.log.error("SPM_MailMonitor: exception: %s caught "
"while handling message: %s", str(e),
newMail[msgStart:msgStart + MESSAGE_SIZE])
except:
self.log.error("SPM_MailMonitor: exception caught while "
"handling message: %s",
newMail[msgStart:msgStart + MESSAGE_SIZE],
exc_info=True)
self._incomingMail = newMail
return send
def _checkForMail(self):
# Lock is acquired in order to make sure that neither _numHosts nor
# incomingMail are changed during checkForMail
self._inLock.acquire()
try:
# self.log.debug("SPM_MailMonitor -_checking for mail")
cmd = self._inCmd + ['bs=' + str(self._outMailLen)]
# self.log.debug("SPM_MailMonitor - reading incoming mail, "
# "command: " + str(cmd))
(rc, in_mail, err) = misc.execCmd(cmd, raw=True)
if rc:
raise IOError(errno.EIO, "_handleRequests._checkForMail - "
"Could not read mailbox: %s" % self._inbox)
if (len(in_mail) != (self._outMailLen)):
self.log.error('SPM_MailMonitor: _checkForMail - dd succeeded '
'but read %d bytes instead of %d, cannot check '
'mail. Read mail contains: %s', len(in_mail),
self._outMailLen, repr(in_mail[:80]))
raise RuntimeError("_handleRequests._checkForMail - Could not "
"read mailbox")
# self.log.debug("Parsing inbox content: %s", in_mail)
if self._handleRequests(in_mail):
self._outLock.acquire()
try:
cmd = self._outCmd + ['bs=' + str(self._outMailLen)]
(rc, out, err) = _mboxExecCmd(cmd,
data=self._outgoingMail)
if rc:
self.log.warning("SPM_MailMonitor couldn't write "
"outgoing mail, dd failed")
finally:
self._outLock.release()
finally:
self._inLock.release()
def sendReply(self, msgID, msg):
# Lock is acquired in order to make sure that neither _numHosts nor
# outgoingMail are changed while used
self._outLock.acquire()
try:
msgOffset = msgID * MESSAGE_SIZE
self._outgoingMail = \
self._outgoingMail[0:msgOffset] + msg.payload + \
self._outgoingMail[msgOffset + MESSAGE_SIZE:self._outMailLen]
mailboxOffset = (msgID / SLOTS_PER_MAILBOX) * MAILBOX_SIZE
mailbox = self._outgoingMail[mailboxOffset:
mailboxOffset + MAILBOX_SIZE]
cmd = self._outCmd + ['bs=' + str(MAILBOX_SIZE),
'seek=' + str(mailboxOffset / MAILBOX_SIZE)]
# self.log.debug("Running command: %s, for message id: %s",
# str(cmd), str(msgID))
(rc, out, err) = _mboxExecCmd(cmd, data=mailbox)
if rc:
self.log.error("SPM_MailMonitor: sendReply - couldn't send "
"reply, dd failed")
finally:
self._outLock.release()
def run(self):
try:
while not self._stop:
try:
self._checkForMail()
except:
self.log.error("Error checking for mail", exc_info=True)
time.sleep(self._monitorInterval)
finally:
self._stopped = True
self.tp.joinAll(waitForTasks=False)
self.log.info("SPM_MailMonitor - Incoming mail monitoring thread "
"stopped")
def wait_timeout(monitor_interval):
"""
Designed to return 3 seconds wait timeout for monitor interval of 2
seconds, keeping the behaivor in runtime the same as it was in the last 8
years, while allowing shorter times for testing.
"""
return monitor_interval * 3 / 2
| gpl-2.0 | 5,307,111,074,492,991,000 | 40.688344 | 79 | 0.515629 | false |
butala/pyrsss | pyrsss/mag/fgm2iaga.py | 1 | 5144 | import sys
import logging
import os
from datetime import datetime
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as NP
import pandas as PD
logger = logging.getLogger('pyrsss.mag.fm2iaga')
HEADER_TEMPLATE = """\
Format IAGA-2002 |
Source of Data CARISMA |
IAGA CODE {stn} |
Geodetic Latitude {lat:<8.3f} |
Geodetic Longitude {lon:<8.3f} |
Elevation {el:<8.3f} |
Reported XYZF |
DATE TIME DOY {stn}X {stn}Y {stn}Z {stn}F |
"""
def parse(fname):
"""
Parse FGM format data *fname*. Return :class:`DataFrame`
containing all information found in the file.
The FGM file format is used by CARISMA to store data and is
described here:
http://www.carisma.ca/carisma-data/fgm-data-format.
"""
with open(fname) as fid:
siteid, lat, lon, date, pos_format, units, sample_rate = fid.next().split()
dt = []
x = []
y = []
z = []
flag = []
for line in fid:
cols = line.split()
dt.append(datetime.strptime(cols[0], '%Y%m%d%H%M%S'))
x.append(float(cols[1]))
y.append(float(cols[2]))
z.append(float(cols[3]))
if cols[4] == '.':
flag.append(False)
elif cols[4] == 'x':
flag.append(True)
else:
raise ValueError('unknown flag value {} encountered in {}'.format(cols[4], fname))
f = NP.hypot(x, NP.hypot(y, z))
df = PD.DataFrame(data={'x': x, 'y': y, 'z': z, 'f': f, 'flag': flag},
index=dt)
df.siteid = siteid
df.lat = float(lat)
df.lon = float(lon)
df.date = datetime.strptime(date, '%Y%m%d')
df.pos_format = pos_format
df.units = units
df.sample_rate = sample_rate
return df
def fgm2iaga(path,
fgm_fname,
ftype='v',
output_template='{stn}{date:%Y%m%d}{ftype}{interval}.{interval}'):
"""
Parse FGM format file *fgm_fname* and reformat it to IAGA2002 and
save at *path* (using *output_tempalte* to form the file
name). Return the file name. The *ftype* denotes the file type: p
- provisional, d - definitive, q - quasi-definitive, or v -
variation.
"""
df = parse(fgm_fname)
delta = (df.index[1] - df.index[0]).total_seconds()
if delta == 1.0:
interval = 'sec'
elif delta == 60.0:
interval = 'min'
else:
raise ValueError('unknown data interval found in {}'.format(fgm_fname))
stn = df.siteid[:3].upper()
out_fname = os.path.join(path,
output_template.format(stn=stn.lower(),
date=df.date,
ftype=ftype,
interval=interval))
with open(out_fname, 'w') as fid:
fid.write(HEADER_TEMPLATE.format(stn=stn.upper(),
lat=df.lat,
lon=df.lon,
el=0))
for row in df.itertuples():
dt = row.Index
if row.flag:
X = Y = Z = F = 99999
else:
X = row.x
Y = row.y
Z = row.z
F = NP.linalg.norm([X, Y, Z])
fid.write('{date:%Y-%m-%d %H:%M:%S.000} {date:%j}'
' {X:>9.2f} {Y:>9.2f} {Z:>9.2f} {F:>9.2f}\n'.format(date=dt,
X=X,
Y=Y,
Z=Z,
F=F))
return out_fname
def main(argv=None):
if argv is None:
argv = sys.argv
parser = ArgumentParser('Convert FGM format data (CARISMA) to IAGA2002 format.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('output_path',
type=str,
help='path to store daily IAGA2002 format files')
parser.add_argument('fgm_fnames',
type=str,
nargs='+',
metavar='fgm_fname',
help='FGM format file')
args = parser.parse_args(argv[1:])
for fgm_fname in args.fgm_fnames:
iaga_fname = fgm2iaga(args.output_path, fgm_fname)
logger.info('{} -> {}'.format(fgm_fname, iaga_fname))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| mit | -107,705,631,235,652,930 | 35.48227 | 98 | 0.437597 | false |
yoneken/train_tf | compressImg2TrainData.py | 1 | 4562 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Directory structure
TRAIN_DIR:
label0:
img0001.png
img0002.png
img0003.png
label1:
img0001.png
img0002.png
.
.
.
label9:
img0001.png
'''
import cv2, os, gzip, random
import numpy as np
from itertools import chain
class MakeMnistData:
'''This class makes a train data set and a test data set for MNIST'''
def __init__(self):
self.LABEL_MAGIC_NUMBER = 2049
self.IMAGE_MAGIC_NUMBER = 2051
self.data_label = [] # the length is the same with the all data
self.img_data = [] # the length is the same with the all data
self.data_size = [] # the length is the same with the classes
self.label_name = [] # the length is the same with the classes
def _make32(self, val):
# Big endian
return [val >> i & 0xff for i in [24,16,8,0]]
def load(self, dirname):
for i,dname in enumerate(sorted(next(os.walk(dirname))[1])):
files = next(os.walk(dirname + "/" + dname))[2]
self.data_label.append([i]*len(files))
self.data_size.append(len(files))
self.label_name.append(dname)
for filename in files:
img_file = dirname + "/" + dname + "/" + filename
img = cv2.imread(img_file)
img = cv2.resize(img, (28, 28))
imgg = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
self.img_data.append(imgg[:,:].reshape(imgg.size))
self.data_label = list(chain.from_iterable(self.data_label))
# Shuffle the data
tmp_dl = list(self.data_label)
tmp_id = list(self.img_data)
indices = np.arange(len(self.data_label))
np.random.shuffle(indices)
for i in range(len(self.data_label)):
self.data_label[i] = tmp_dl[indices[i]]
self.img_data[i] = tmp_id[indices[i]]
def write(self, dirname, valid_size=0):
if valid_size == 0:
valid_size = int(len(self.data_label) * 0.05)
# make test data
test_data_label = self.data_label[:valid_size]
test_img_data = self.img_data[:valid_size]
self.data_label = self.data_label[valid_size:]
self.img_data = self.img_data[valid_size:]
test_data_size = [0]*len(self.data_size)
for i in range(valid_size):
ind = test_data_label[i]
self.data_size[ind] = self.data_size[ind] - 1
test_data_size[ind] = test_data_size[ind] + 1
# make a train label data
# make header
ldata = self._make32(self.LABEL_MAGIC_NUMBER)
ldata = np.r_[ldata, self._make32(sum(self.data_size))]
ldata = np.r_[ldata, self.data_label]
with gzip.open(dirname + "/train-labels-idx1-ubyte.gz",'wb') as f:
f.write(np.array(ldata, dtype=np.uint8))
# make a test label data
# make header
tldata = self._make32(self.LABEL_MAGIC_NUMBER)
tldata = np.r_[tldata, self._make32(sum(test_data_size))]
tldata = np.r_[tldata, test_data_label]
with gzip.open(dirname + "/t10k-labels-idx1-ubyte.gz",'wb') as f:
f.write(np.array(tldata, dtype=np.uint8))
# make a train image data
# make header
idata = self._make32(self.IMAGE_MAGIC_NUMBER)
idata = np.r_[idata, self._make32(sum(self.data_size))]
idata = np.r_[idata, self._make32(28)]
idata = np.r_[idata, self._make32(28)]
idata = np.r_[idata, list(chain.from_iterable(self.img_data))]
# write value
with gzip.open(dirname + "/train-images-idx3-ubyte.gz",'wb') as f:
f.write(np.array(idata, dtype=np.uint8))
# make a test image data
# make header
tidata = self._make32(self.IMAGE_MAGIC_NUMBER)
tidata = np.r_[tidata, self._make32(sum(test_data_size))]
tidata = np.r_[tidata, self._make32(28)]
tidata = np.r_[tidata, self._make32(28)]
tidata = np.r_[tidata, list(chain.from_iterable(test_img_data))]
# write value
with gzip.open(dirname + "/t10k-images-idx3-ubyte.gz",'wb') as f:
f.write(np.array(tidata, dtype=np.uint8))
s = ",".join(["\"" + x + "\"" for x in self.label_name])
print(s)
with open(dirname + "/label_name.txt", 'w') as f:
f.write(s)
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="This script makes a train and a validation dataset")
parser.add_argument("--in_dir", dest="indir", type=str, default="data")
parser.add_argument("--out_dir", dest="outdir", type=str, default="data")
parser.add_argument("--valid_size", dest="valsize", type=int, default=0, help="Default size is 5% of all data")
args = parser.parse_args()
mmd = MakeMnistData()
mmd.load(args.indir)
mmd.write(args.outdir, args.valsize)
| apache-2.0 | 1,497,630,365,127,481,300 | 31.126761 | 113 | 0.629329 | false |
e2thenegpii/EnphaseInterface | setup.py | 1 | 1063 |
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
long_description=f.read()
setup(
name='pyEnFace',
version='0.9.6',
description='A python interface to the Enphase Developer API',
long_description=long_description,
url='https://github.com/e2thenegpii/EnphaseInterface',
author='Eldon Allred',
author_email='[email protected]',
license='Gnu Public License version 3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='enphase development',
packages=find_packages(),
package_data={'':['DESCRIPTION.rst']},
include_package_data=True,
install_requires=['pandas',
'sqlalchemy',
'lxml',]
)
| gpl-3.0 | 1,655,293,226,129,300,500 | 28.527778 | 75 | 0.650047 | false |
Thylossus/tud-movie-character-insights | Server/Tools/Parser/show_stats.py | 1 | 1183 | #!/usr/bin/python3
stats_version="0.11"
# Include custom libs
import sys
sys.path.append( '../../include/python' )
import serverutils.config as config
import serverutils.mongohelper as mongohelper
import re
from pymongo import MongoClient
print("Word stats v.", stats_version)
print("================================")
print()
mongoClient, mongoDb = mongohelper.getMongoClient()
characterWords = []
movies = mongoDb.rawMovies.find({})
for movie in movies:
if len(sys.argv) > 1 and not movie['_id'] in sys.argv:
continue
print("Scanning " + movie['_id'])
counters = {}
for quote in mongoDb.rawQuotes.find({'_id.movie': movie['_id']}):
c = len(re.findall(r'\w+',quote['text']))
if not quote['character'] in counters:
counters[quote['character']] = 0
counters[quote['character']] = counters[quote['character']] + c
for character, count in counters.items():
characterWords += [(movie['_id'], character, count)]
characterWords = sorted(characterWords, key=lambda a: -a[2])
for i in range(200 if len(characterWords) > 200 else len(characterWords)):
print(str(characterWords[i][2]) + " words: " + characterWords[i][1] + " (" + characterWords[i][0] + ")")
| apache-2.0 | 7,304,283,256,309,693,000 | 26.511628 | 105 | 0.668639 | false |
little-dude/pyroute2 | pyroute2/ipdb/route.py | 1 | 15960 | import logging
import threading
from socket import AF_UNSPEC
from pyroute2.common import basestring
from pyroute2.netlink import nlmsg
from pyroute2.netlink.rtnl.rtmsg import rtmsg
from pyroute2.netlink.rtnl.req import IPRouteRequest
from pyroute2.ipdb.transactional import Transactional
from pyroute2.ipdb.linkedset import LinkedSet
class Metrics(Transactional):
_fields = [rtmsg.metrics.nla2name(i[0]) for i in rtmsg.metrics.nla_map]
class NextHopSet(LinkedSet):
def __init__(self, prime=None):
super(NextHopSet, self).__init__()
prime = prime or []
for v in prime:
self.add(v)
def __sub__(self, vs):
ret = type(self)()
sub = set(self.raw.keys()) - set(vs.raw.keys())
for v in sub:
ret.add(self[v], raw=self.raw[v])
return ret
def __make_nh(self, prime):
return (prime.get('flags', 0),
prime.get('hops', 0),
prime.get('ifindex', 0),
prime.get('gateway'))
def __getitem__(self, key):
return dict(zip(('flags', 'hops', 'ifindex', 'gateway'), key))
def __iter__(self):
def NHIterator():
for x in tuple(self.raw.keys()):
yield self[x]
return NHIterator()
def add(self, prime, raw=None):
return super(NextHopSet, self).add(self.__make_nh(prime))
def remove(self, prime, raw=None):
hit = False
for nh in self:
for key in prime:
if prime[key] != nh.get(key):
break
else:
hit = True
super(NextHopSet, self).remove(self.__make_nh(nh))
if not hit:
raise KeyError('nexthop not found')
class WatchdogKey(dict):
'''
Construct from a route a dictionary that could be used as
a match for IPDB watchdogs.
'''
def __init__(self, route):
dict.__init__(self, [x for x in IPRouteRequest(route).items()
if x[0] in ('dst', 'dst_len', 'oif',
'iif', 'table')])
def RouteKey(msg):
'''
Construct from a netlink message a key that can be used
to locate the route in the table
'''
if isinstance(msg, nlmsg):
src = None
# calculate dst
if msg.get_attr('RTA_DST', None) is not None:
dst = '%s/%s' % (msg.get_attr('RTA_DST'),
msg['dst_len'])
else:
dst = 'default'
# use output | input interfaces as key also
iif = msg.get_attr(msg.name2nla('iif'))
oif = msg.get_attr(msg.name2nla('oif'))
elif isinstance(msg, Transactional):
src = None
dst = msg.get('dst')
iif = msg.get('iif')
oif = msg.get('oif')
else:
raise TypeError('prime not supported')
# key: src, dst, iif, oif
return (src, dst, iif, oif)
class Route(Transactional):
'''
Persistent transactional route object
'''
_fields = [rtmsg.nla2name(i[0]) for i in rtmsg.nla_map]
_fields.append('flags')
_fields.append('src_len')
_fields.append('dst_len')
_fields.append('table')
_fields.append('removal')
_virtual_fields = ['ipdb_scope', 'ipdb_priority']
_fields.extend(_virtual_fields)
_linked_sets = ['multipath', ]
cleanup = ('attrs',
'header',
'event',
'cacheinfo')
def __init__(self, ipdb, mode=None, parent=None, uid=None):
Transactional.__init__(self, ipdb, mode, parent, uid)
self._load_event = threading.Event()
with self._direct_state:
for i in self._fields:
self[i] = None
self['metrics'] = Metrics(parent=self)
self['multipath'] = NextHopSet()
self['ipdb_priority'] = 0
def add_nh(self, prime):
with self._write_lock:
tx = self.get_tx()
with tx._direct_state:
tx['multipath'].add(prime)
def del_nh(self, prime):
with self._write_lock:
tx = self.get_tx()
with tx._direct_state:
tx['multipath'].remove(prime)
def load_netlink(self, msg):
with self._direct_state:
if self['ipdb_scope'] == 'locked':
# do not touch locked interfaces
return
self['ipdb_scope'] = 'system'
self.update(msg)
# re-init metrics
metrics = self.get('metrics', Metrics(parent=self))
with metrics._direct_state:
for metric in tuple(metrics.keys()):
del metrics[metric]
self['metrics'] = metrics
# merge key
for (name, value) in msg['attrs']:
norm = rtmsg.nla2name(name)
# normalize RTAX
if norm == 'metrics':
with self['metrics']._direct_state:
for (rtax, rtax_value) in value['attrs']:
rtax_norm = rtmsg.metrics.nla2name(rtax)
self['metrics'][rtax_norm] = rtax_value
elif norm == 'multipath':
self['multipath'] = NextHopSet()
for v in value:
nh = {}
for name in [x[0] for x in rtmsg.nh.fields]:
nh[name] = v[name]
for (rta, rta_value) in v['attrs']:
rta_norm = rtmsg.nla2name(rta)
nh[rta_norm] = rta_value
self['multipath'].add(nh)
else:
self[norm] = value
if msg.get_attr('RTA_DST', None) is not None:
dst = '%s/%s' % (msg.get_attr('RTA_DST'),
msg['dst_len'])
else:
dst = 'default'
self['dst'] = dst
# finally, cleanup all not needed
for item in self.cleanup:
if item in self:
del self[item]
self.sync()
def sync(self):
self._load_event.set()
def reload(self):
# do NOT call get_routes() here, it can cause race condition
# self._load_event.wait()
return self
def commit(self, tid=None, transaction=None, rollback=False):
self._load_event.clear()
error = None
drop = True
if tid:
transaction = self._transactions[tid]
else:
if transaction:
drop = False
else:
transaction = self.last()
# create a new route
if self['ipdb_scope'] != 'system':
try:
self.ipdb.update_routes(
self.nl.route('add', **IPRouteRequest(transaction)))
except Exception:
self.nl = None
self.ipdb.routes.remove(self)
raise
# work on existing route
snapshot = self.pick()
try:
# route set
request = IPRouteRequest(transaction - snapshot)
if any([request[x] not in (None, {'attrs': []}) for x in request]):
self.ipdb.update_routes(
self.nl.route('set', **IPRouteRequest(transaction)))
# route removal
if (transaction['ipdb_scope'] in ('shadow', 'remove')) or\
((transaction['ipdb_scope'] == 'create') and rollback):
if transaction['ipdb_scope'] == 'shadow':
self.set_item('ipdb_scope', 'locked')
self.ipdb.update_routes(
self.nl.route('delete', **IPRouteRequest(snapshot)))
if transaction['ipdb_scope'] == 'shadow':
self.set_item('ipdb_scope', 'shadow')
except Exception as e:
if not rollback:
ret = self.commit(transaction=snapshot, rollback=True)
if isinstance(ret, Exception):
error = ret
else:
error = e
else:
if drop:
self.drop()
x = RuntimeError()
x.cause = e
raise x
if drop and not rollback:
self.drop()
if error is not None:
error.transaction = transaction
raise error
if not rollback:
with self._direct_state:
self['multipath'] = transaction['multipath']
self.reload()
return self
def remove(self):
self['ipdb_scope'] = 'remove'
return self
def shadow(self):
self['ipdb_scope'] = 'shadow'
return self
class RoutingTable(object):
def __init__(self, ipdb, prime=None):
self.ipdb = ipdb
self.lock = threading.Lock()
self.idx = {}
self.kdx = {}
def __repr__(self):
return repr([x['route'] for x in self.idx.values()])
def __len__(self):
return len(self.keys())
def __iter__(self):
for record in tuple(self.idx.values()):
yield record['route']
def keys(self, key='dst'):
with self.lock:
return [x['route'][key] for x in self.idx.values()]
def describe(self, target, forward=True):
# match the route by index -- a bit meaningless,
# but for compatibility
if isinstance(target, int):
keys = tuple(self.idx.keys())
return self.idx[keys[target]]
# match the route by key
if isinstance(target, (tuple, list)):
try:
# full match
return self.idx[target]
except KeyError:
# match w/o iif/oif
return self.idx[target[:2] + (None, None)]
# match the route by string
if isinstance(target, basestring):
target = {'dst': target}
# match the route by dict spec
if not isinstance(target, dict):
raise TypeError('unsupported key type')
for record in self.idx.values():
for key in target:
# skip non-existing keys
#
# it's a hack, but newly-created routes
# don't contain all the fields that are
# in the netlink message
if record['route'].get(key) is None:
continue
# if any key doesn't match
if target[key] != record['route'][key]:
break
else:
# if all keys match
return record
if not forward:
raise KeyError('route not found')
# split masks
if target.get('dst', '').find('/') >= 0:
dst = target['dst'].split('/')
target['dst'] = dst[0]
target['dst_len'] = int(dst[1])
if target.get('src', '').find('/') >= 0:
src = target['src'].split('/')
target['src'] = src[0]
target['src_len'] = int(src[1])
# load and return the route, if exists
route = Route(self.ipdb)
route.load_netlink(self.ipdb.nl.get_routes(**target)[0])
return {'route': route,
'key': None}
def __delitem__(self, key):
with self.lock:
item = self.describe(key, forward=False)
del self.idx[RouteKey(item['route'])]
def __setitem__(self, key, value):
with self.lock:
try:
record = self.describe(key, forward=False)
except KeyError:
record = {'route': Route(self.ipdb),
'key': None}
if isinstance(value, nlmsg):
record['route'].load_netlink(value)
elif isinstance(value, Route):
record['route'] = value
elif isinstance(value, dict):
with record['route']._direct_state:
record['route'].update(value)
key = RouteKey(record['route'])
if record['key'] is None:
self.idx[key] = {'route': record['route'],
'key': key}
else:
self.idx[key] = record
if record['key'] != key:
del self.idx[record['key']]
record['key'] = key
def __getitem__(self, key):
with self.lock:
return self.describe(key, forward=True)['route']
def __contains__(self, key):
try:
with self.lock:
self.describe(key, forward=False)
return True
except KeyError:
return False
class RoutingTableSet(object):
def __init__(self, ipdb, ignore_rtables=None):
self.ipdb = ipdb
self.ignore_rtables = ignore_rtables or []
self.tables = {254: RoutingTable(self.ipdb)}
def add(self, spec=None, **kwarg):
'''
Create a route from a dictionary
'''
spec = spec or kwarg
table = spec.get('table', 254)
assert 'dst' in spec
if table not in self.tables:
self.tables[table] = RoutingTable(self.ipdb)
route = Route(self.ipdb)
metrics = spec.pop('metrics', {})
multipath = spec.pop('multipath', [])
route.update(spec)
route.metrics.update(metrics)
route.set_item('ipdb_scope', 'create')
self.tables[table][route['dst']] = route
route.begin()
for nh in multipath:
route.add_nh(nh)
return route
def load_netlink(self, msg):
'''
Loads an existing route from a rtmsg
'''
table = msg.get('table', 254)
if table in self.ignore_rtables:
return
if not isinstance(msg, rtmsg):
return
# construct a key
# FIXME: temporary solution
# FIXME: can `Route()` be used as a key?
key = RouteKey(msg)
# RTM_DELROUTE
if msg['event'] == 'RTM_DELROUTE':
try:
# locate the record
record = self.tables[table][key]
# delete the record
if record['ipdb_scope'] not in ('locked', 'shadow'):
del self.tables[table][key]
record.set_item('ipdb_scope', 'detached')
# sync ???
record.sync()
except Exception as e:
logging.debug(e)
logging.debug(msg)
return
# RTM_NEWROUTE
if table not in self.tables:
self.tables[table] = RoutingTable(self.ipdb)
self.tables[table][key] = msg
return self.tables[table][key]
def remove(self, route, table=None):
if isinstance(route, Route):
table = route.get('table', 254) or 254
route = route.get('dst', 'default')
else:
table = table or 254
del self.tables[table][route]
def describe(self, spec, table=254):
return self.tables[table].describe(spec)
def get(self, dst, table=None):
table = table or 254
return self.tables[table][dst]
def keys(self, table=254, family=AF_UNSPEC):
return [x['dst'] for x in self.tables[table]
if (x.get('family') == family)
or (family == AF_UNSPEC)]
def has_key(self, key, table=254):
return key in self.tables[table]
def __contains__(self, key):
return key in self.tables[254]
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
assert key == value['dst']
return self.add(value)
def __delitem__(self, key):
return self.remove(key)
def __repr__(self):
return repr(self.tables[254])
| apache-2.0 | 165,606,072,297,621,380 | 30.417323 | 79 | 0.497368 | false |
gjhiggins/sprox | sprox/fillerbase.py | 1 | 13428 | """
fillerbase Module
Classes to help fill widgets with data
Copyright (c) 2008-10 Christopher Perkins
Original Version by Christopher Perkins 2008
Released under MIT license.
"""
from .configbase import ConfigBase, ConfigBaseError
from .metadata import FieldsMetadata
import inspect
from datetime import datetime
encoding = 'utf-8'
class FillerBase(ConfigBase):
"""
:Modifiers:
see :mod:`sprox.configbase`.
The base filler class.
:Arguments:
values
pass through of values. This is typically a set of default values that is updated by the
filler. This is useful when updating an existing form.
kw
Set of keyword arguments for assisting the fill. This is for instance information like offset
and limit for a TableFiller.
:Usage:
>>> filler = FillerBase()
>>> filler.get_value()
{}
"""
def get_value(self, values=None, **kw):
"""
The main function for getting data to fill widgets,
"""
if values is None:
values = {}
return values
class ModelsFiller(FillerBase):
pass
class ModelDefFiller(FillerBase):
pass
class FormFiller(FillerBase):
__metadata_type__ = FieldsMetadata
def get_value(self, values=None, **kw):
values = super(FormFiller, self).get_value(values)
values['sprox_id'] = self.__sprox_id__
return values
class TableFiller(FillerBase):
"""
This is the base class for generating table data for use in table widgets. The TableFiller uses
it's provider to obtain a dictionary of information about the __entity__ this Filler defines.
This class is especially useful when you need to return a json stream, because it allows for
customization of attributes. A package which has similar functionality to this is TurboJson,
but TurboJson is rules-based, where the semantics for generating dictionaries follows the same
:mod:`sprox.configbase` methodology.
Modifiers defined in this class
+-----------------------------------+--------------------------------------------+------------------------------+
| Name | Description | Default |
+===================================+============================================+==============================+
| __actions__ | An overridable function to define how to | a function that creates an |
| | display action links in the form. | edit and delete link. |
+-----------------------------------+--------------------------------------------+------------------------------+
| __metadata_type__ | How should we get data from the provider. | FieldsMetadata |
+-----------------------------------+--------------------------------------------+------------------------------+
| __possible_field_names__ | list or dict of names to use for discovery | None |
| | of field names for relationship columns. | (See below.) |
| | (None uses the default list from | |
| | :class:`sprox.configbase:ConfigBase`.) | |
| | A dict provides field-level granularity | |
| | (See also explanation below.) | |
+-----------------------------------+--------------------------------------------+------------------------------+
| __datetime_formatstr__ | format string for the strftime function of | '%Y-%m-%d %H:%M:%S' |
| | datetime objects. | ("simplified" ISO-8601) |
| | Classical american format would be | |
| | '%m/%d/%Y %H:%M%p'. | |
+-----------------------------------+--------------------------------------------+------------------------------+
see modifiers also in :mod:`sprox.configbase`.
:Relations:
By default, TableFiller will populate relations (join or foreign_key) with either the value
from the related table, or a comma-separated list of values. These values are derived from
the related object given the field names provided by the __possible_field_names__ modifier.
For instance, if you have a User class which is related to Groups, the groups item in the result
dictionaries will be populated with Group.group_name. The default field names are specified
in __possible_field_name_defaults__: _name, name, description, title.
:RESTful Actions:
By default, Table filler provides an "__actions__" item in the resultant dictionary list. This provides
and edit, and (javascript) delete link which provide edit and DELETE functionality as HTML verbs in REST.
For more information on developing RESTful URLs, please visit `http://microformats.org/wiki/rest/urls <http://microformats.org/wiki/rest/urls/>`_ .
:Usage:
Here is how we would get the values to fill up a user's table, minus the action column, and created date.
>>> class UsersFiller(TableFiller):
... __model__ = User
... __actions__ = False
... __omit_fields__ = ['created']
>>> users_filler = UsersFiller(session)
>>> value = users_filler.get_value(values={}, limit=20, offset=0)
>>> print(value) #doctest: +IGNORE_WHITESPACE
[{'town': u'Arvada', 'user_id': u'1', 'user_name': u'asdf',
'town_id': u'1', 'groups': u'4', '_password': '******', 'password': '******',
'email_address': u'[email protected]', 'display_name': u'None'}]
"""
__actions__ = True
__metadata_type__ = FieldsMetadata
__possible_field_names__ = None
__datetime_formatstr__ = '%Y-%m-%d %H:%M:%S'
def _do_init_attrs(self):
super(TableFiller, self)._do_init_attrs()
if self.__possible_field_names__ is None:
self.__possible_field_names__ = self.__possible_field_name_defaults__
def _get_list_data_value(self, field, values):
l = []
if isinstance(self.__possible_field_names__, dict) and field in self.__possible_field_names__:
view_names = self.__possible_field_names__[field]
if not isinstance(view_names, list):
view_names = [view_names]
elif isinstance(self.__possible_field_names__, list):
view_names = self.__possible_field_names__
else:
view_names = self.__possible_field_name_defaults__
for value in values:
if not isinstance(value, str):
name = self.__provider__.get_view_field_name(value.__class__, self.__possible_field_names__)
l.append(str(getattr(value, name)))
else:
#this is needed for postgres to see array values
return values
return ', '.join(l)
def _get_relation_value(self, field, value):
#this may be needed for catwalk, but I am not sure what conditions cause it to be needed
#if value is None:
# return None
name = self.__provider__.get_view_field_name(value.__class__, self.__possible_field_names__)
return getattr(value, name)
def get_count(self):
"""Returns the total number of items possible for retrieval. This can only be
executed after a get_value() call. This call is useful for creating pagination in the context
of a user interface.
"""
if not hasattr(self, '__count__'):
raise ConfigBaseError('Count not yet set for filler. try calling get_value() first.')
return self.__count__
def _do_get_fields(self):
fields = super(TableFiller, self)._do_get_fields()
if '__actions__' not in self.__omit_fields__ and '__actions__' not in fields:
fields.insert(0, '__actions__')
return fields
def __actions__(self, obj):
"""Override this function to define how action links should be displayed for the given record."""
primary_fields = self.__provider__.get_primary_fields(self.__entity__)
pklist = '/'.join([str(getattr(obj, x)) for x in primary_fields])
value = '<div><div> <a href="'+pklist+'/edit" style="text-decoration:none">edit</a>'\
'</div><div>'\
'<form method="POST" action="'+pklist+'" class="button-to">'\
'<input type="hidden" name="_method" value="DELETE" />'\
'<input class="delete-button" onclick="return confirm(\'Are you sure?\');" value="delete" type="submit" '\
'style="background-color: transparent; float:left; border:0; color: #286571; display: inline; margin: 0; padding: 0;"/>'\
'</form>'\
'</div></div>'
return value
def _do_get_provider_count_and_objs(self, **kw):
limit = kw.pop('limit', None)
offset = kw.pop('offset', None)
order_by = kw.pop('order_by', None)
desc = kw.pop('desc', False)
substring_filters = kw.pop('substring_filters', [])
count, objs = self.__provider__.query(self.__entity__, limit, offset, self.__limit_fields__,
order_by, desc, substring_filters=substring_filters,
filters=kw)
self.__count__ = count
return count, objs
def get_value(self, values=None, **kw):
"""
Get the values to fill a form widget.
:Arguments:
offset
offset into the records
limit
number of records to return
order_by
name of the column to the return values ordered by
desc
order the columns in descending order
All the other arguments will be used to filter the result
"""
count, objs = self._do_get_provider_count_and_objs(**kw)
self.__count__ = count
rows = []
for obj in objs:
row = {}
for field in self.__fields__:
field_method = getattr(self, field, None)
if inspect.ismethod(field_method):
argspec = inspect.getargspec(field_method)
if argspec and (len(argspec[0])-2>=len(kw) or argspec[2]):
value = getattr(self, field)(obj, **kw)
else:
value = getattr(self, field)(obj)
else:
value = getattr(obj, field)
if 'password' in field.lower():
row[field] = '******'
continue
elif isinstance(value, list) or self.__provider__.is_query(self.__entity__, value):
value = self._get_list_data_value(field, value)
elif isinstance(value, datetime):
value = value.strftime(self.__datetime_formatstr__)
elif self.__provider__.is_relation(self.__entity__, field) and value is not None:
value = self._get_relation_value(field, value)
elif self.__provider__.is_binary(self.__entity__, field) and value is not None:
value = '<file>'
if isinstance(value, str):
try:
value = str(value, encoding='utf-8')
except:
pass
row[field] = str(value)
rows.append(row)
return rows
class EditFormFiller(FormFiller):
"""
This class will help to return a single record for use within a form or otherwise.
The values are returned in dictionary form.
:Modifiers:
see :mod:`sprox.configbase`.
:Usage:
>>> class UserFiller(EditFormFiller):
... __model__ = User
>>> users_filler = UsersFiller(session)
>>> value = users_filler.get_value(values={'user_id':'1'})
>>> value # doctest: +SKIP
{'town': u'Arvada', 'user_id': u'1', 'created': u'2008-12-28 17:33:11.078931',
'user_name': u'asdf', 'town_id': u'1', 'groups': u'4', '_password': '******',
'password': '******', 'email_address': u'[email protected]', 'display_name': u'None'}
"""
def get_value(self, values=None, **kw):
obj = self.__provider__.get_obj(self.__entity__, params=values, fields=self.__fields__)
values = self.__provider__.dictify(obj, self.__fields__, self.__omit_fields__)
for key in self.__fields__:
method = getattr(self, key, None)
if method:
if inspect.ismethod(method):
values[key] = method(obj, **kw)
return values
class RecordFiller(EditFormFiller):pass
class AddFormFiller(FormFiller):
def get_value(self, values=None, **kw):
"""xxx: get the server/entity defaults."""
kw = super(AddFormFiller, self).get_value(values, **kw)
return self.__provider__.get_default_values(self.__entity__, params=values)
| mit | 4,328,173,973,949,016,000 | 44.364865 | 151 | 0.525767 | false |
emschorsch/projector-hangman | digits-training.py | 1 | 1169 | import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('digits.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow('pre-gray', img)
cv2.waitKey()
cv2.imshow('gray', gray)
cv2.waitKey()
# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row,100) for row in np.vsplit(gray,50)]
# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)
# Now we prepare train_data and test_data.
train = x[:,:50].reshape(-1,400).astype(np.float32) # Size = (2500,400)
test = x[:,50:100].reshape(-1,400).astype(np.float32) # Size = (2500,400)
# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k,250)[:,np.newaxis]
test_labels = train_labels.copy()
# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.KNearest()
knn.train(train,train_labels)
ret,result,neighbours,dist = knn.find_nearest(test,k=5)
# Now we check the accuracy of classification
# For that, compare the result with test_labels and check which are wrong
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print accuracy
| mit | -2,089,103,687,261,399,300 | 28.225 | 73 | 0.723695 | false |
Weasyl/weasyl | weasyl/test/test_comment.py | 1 | 7400 | import pytest
import unittest
from libweasyl import staff
from libweasyl.models import site
from weasyl import define as d
from weasyl import comment
from weasyl import shout
from weasyl.error import WeasylError
from weasyl.test import db_utils
@pytest.mark.usefixtures('db')
class TestRemoveComment(object):
generation_parameters = [
("submit", db_utils.create_submission_comment, comment.remove,
db_utils.create_submission),
("journal", db_utils.create_journal_comment, comment.remove,
db_utils.create_journal),
("char", db_utils.create_character_comment, comment.remove,
db_utils.create_character),
(None, db_utils.create_shout, shout.remove, db_utils.create_shout),
]
@pytest.fixture(autouse=True, params=generation_parameters)
def setUp(self, request, monkeypatch):
# userid of owner of the journal/submission/character
self.owner = db_utils.create_user()
# userid of the comment poster
self.commenter = db_utils.create_user()
# userid of a moderator
self.moderator = db_utils.create_user()
# userid of another user who isn't a moderator
self.another_user = db_utils.create_user()
# mock out staff.MODS
monkeypatch.setattr(staff, 'MODS', {self.moderator})
(self.feature, self.create_function, self.remove_function, call) = request.param
self.target = call(self.owner) if self.feature is not None else self.owner
self.commentid = self.create_function(self.commenter, self.target)
self.args = {'commentid': self.commentid}
if self.feature is not None:
self.args['feature'] = self.feature
def test_commenter_can_remove(self):
assert self.target == self.remove_function(self.commenter, **self.args)
def test_commenter_can_not_remove_with_replies(self):
# reply to the existing comment
self.create_function(self.another_user, self.target, parentid=self.commentid)
pytest.raises(WeasylError, self.remove_function, self.commenter, **self.args)
def test_owner_can_remove(self):
assert self.target == self.remove_function(self.owner, **self.args)
def test_mod_can_remove(self):
assert self.target == self.remove_function(self.moderator, **self.args)
def test_other_user_can_not_remove(self):
pytest.raises(
WeasylError, self.remove_function, self.another_user, **self.args)
@pytest.mark.usefixtures("db")
class CheckNotificationsTestCase(unittest.TestCase):
def setUp(self):
self.owner = db_utils.create_user()
self.commenter1 = db_utils.create_user()
self.commenter2 = db_utils.create_user()
def count_notifications(self, user):
return (
d.connect().query(site.SavedNotification)
.filter(site.SavedNotification.userid == user)
.count())
def add_and_remove_comments(self, feature, **kwargs):
kwargs['content'] = 'hello'
# commenter1 posts a comment c1 on submission s
c1 = comment.insert(self.commenter1, **kwargs)
self.assertEqual(1, self.count_notifications(self.owner))
# commenter2 posts a reply to c1
c2 = comment.insert(self.commenter2, parentid=c1, **kwargs)
self.assertEqual(1, self.count_notifications(self.commenter1))
# owner posts a reply to c2
c3 = comment.insert(self.owner, parentid=c2, **kwargs)
self.assertEqual(1, self.count_notifications(self.commenter2))
# commenter1 responds to owner
comment.insert(self.commenter1, parentid=c3, **kwargs)
self.assertEqual(2, self.count_notifications(self.owner))
# owner deletes comment thread
comment.remove(self.owner, feature=feature, commentid=c1)
self.assertEqual(0, self.count_notifications(self.owner))
self.assertEqual(0, self.count_notifications(self.commenter1))
self.assertEqual(0, self.count_notifications(self.commenter2))
def test_add_and_remove_submission(self):
s = db_utils.create_submission(self.owner)
self.add_and_remove_comments('submit', submitid=s)
def test_add_and_remove_journal(self):
j = db_utils.create_journal(self.owner)
self.add_and_remove_comments('journal', journalid=j)
def test_add_and_remove_character(self):
c = db_utils.create_character(self.owner)
self.add_and_remove_comments('char', charid=c)
def test_add_and_remove_shout(self):
# commenter1 posts a shout on owner's page
c1 = shout.insert(self.commenter1, target_user=self.owner, parentid=None, content="hello", staffnotes=False)
self.assertEqual(1, self.count_notifications(self.owner))
shouts = shout.select(0, self.owner)
self.assertEqual(len(shouts), 1)
self.assertTrue(shouts[0].items() >= {"content": "hello"}.items())
# commenter2 posts a reply to c1
c2 = shout.insert(self.commenter2, target_user=self.owner, parentid=c1, content="reply", staffnotes=False)
self.assertEqual(1, self.count_notifications(self.commenter1))
shouts = shout.select(0, self.owner)
self.assertEqual(len(shouts), 2)
self.assertTrue(shouts[1].items() >= {"content": "reply"}.items())
# owner posts a reply to c2
c3 = shout.insert(self.owner, target_user=self.owner, parentid=c2, content="reply 2", staffnotes=False)
self.assertEqual(1, self.count_notifications(self.commenter2))
shouts = shout.select(0, self.owner)
self.assertEqual(len(shouts), 3)
self.assertTrue(shouts[2].items() >= {"content": "reply 2"}.items())
# commenter1 responds to owner
shout.insert(self.commenter1, target_user=self.owner, parentid=c3, content="reply 3", staffnotes=False)
self.assertEqual(2, self.count_notifications(self.owner))
shouts = shout.select(0, self.owner)
self.assertEqual(len(shouts), 4)
self.assertTrue(shouts[3].items() >= {"content": "reply 3"}.items())
# commenter1 posts a new root shout
shout.insert(self.commenter1, target_user=self.owner, parentid=None, content="root 2", staffnotes=False)
self.assertEqual(3, self.count_notifications(self.owner))
shouts = shout.select(0, self.owner)
self.assertEqual(len(shouts), 5)
self.assertTrue(shouts[0].items() >= {"content": "root 2"}.items())
self.assertTrue(shouts[4].items() >= {"content": "reply 3"}.items())
# commenter2 posts another reply to c1
shout.insert(self.commenter2, target_user=self.owner, parentid=c1, content="reply 4", staffnotes=False)
self.assertEqual(2, self.count_notifications(self.commenter1))
shouts = shout.select(0, self.owner)
self.assertEqual(len(shouts), 6)
self.assertTrue(shouts[5].items() >= {"content": "reply 4"}.items())
# owner deletes comment thread
shout.remove(self.owner, commentid=c1)
self.assertEqual(1, self.count_notifications(self.owner))
self.assertEqual(0, self.count_notifications(self.commenter1))
self.assertEqual(0, self.count_notifications(self.commenter2))
shouts = shout.select(0, self.owner)
self.assertEqual(len(shouts), 1)
self.assertTrue(shouts[0].items() >= {"content": "root 2"}.items())
| apache-2.0 | 588,824,309,023,232,900 | 41.528736 | 116 | 0.666081 | false |
mcublocks/embeddecy | Embeddecy-project/docs/embcdocumentation/conf.py | 1 | 10287 | # -*- coding: utf-8 -*-
#
# Язык Embeddecy documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 29 10:45:50 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Язык Embeddecy'
copyright = u'2017, ООО "НПП САТЭК плюс"'
author = u'ООО "НПП САТЭК плюс"'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'ru'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Язык Embeddecy v1.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Embeddecydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Embeddecy.tex', u'Язык Embeddecy Documentation',
u'ООО "НПП САТЭК плюс"', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'embeddecy', u'Язык Embeddecy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Embeddecy', u'Язык Embeddecy Documentation',
author, 'Embeddecy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| apache-2.0 | -8,252,560,133,260,538,000 | 28.141593 | 80 | 0.669211 | false |
dcos/dcos | packages/adminrouter/extra/src/test-harness/tests/test_cache.py | 1 | 50902 | # Copyright (C) Mesosphere, Inc. See LICENSE file for details.
import copy
import logging
import time
import pytest
import requests
from generic_test_code.common import ping_mesos_agent, verify_header
from mocker.endpoints.marathon import (
SCHEDULER_APP_ALWAYSTHERE,
SCHEDULER_APP_ALWAYSTHERE_DIFFERENTPORT,
)
from mocker.endpoints.mesos import AGENT1_ID, EXTRA_AGENT_DICT
from runner.common import CACHE_FIRST_POLL_DELAY, Vegeta
from util import GuardedSubprocess, LineBufferFilter, SearchCriteria
log = logging.getLogger(__name__)
class TestCache:
def test_if_first_cache_refresh_occurs_earlier(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Executing cache refresh triggered by timer': SearchCriteria(1, False),
r'Cache `[\s\w]+` empty. Fetching.': SearchCriteria(3, True),
'Updated Mesos state cache': SearchCriteria(1, True),
'Updated Marathon apps cache': SearchCriteria(1, True),
'Updated marathon leader cache': SearchCriteria(1, True),
}
# Enable recording for marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='record_requests')
# Enable recording for Mesos
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='record_requests')
# Make regular polling occur later than usual, so that we get clear
# results.
ar = nginx_class(cache_poll_period=60, cache_expiration=55)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
timeout=(CACHE_FIRST_POLL_DELAY + 1),
line_buffer=ar.stderr_line_buffer)
lbf.scan_log_buffer()
# Do a request that uses cache so that we can verify that data was
# in fact cached and no more than one req to mesos/marathon
# backends were made
ping_mesos_agent(ar, valid_user_header)
mesos_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
marathon_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='get_recorded_requests')
assert lbf.extra_matches == {}
assert len(mesos_requests) == 1
assert len(marathon_requests) == 2
def test_if_cache_refresh_occurs_regularly(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Executing cache refresh triggered by timer': SearchCriteria(3, False),
r'Cache `[\s\w]+` expired. Refresh.': SearchCriteria(8, True),
'Updated Mesos state cache': SearchCriteria(3, True),
'Updated Marathon apps cache': SearchCriteria(3, True),
'Updated marathon leader cache': SearchCriteria(3, True),
}
cache_poll_period = 4
# Enable recording for marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='record_requests')
# Enable recording for mesos
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='record_requests')
# Make regular polling occur faster than usual to speed up the tests.
ar = nginx_class(cache_poll_period=cache_poll_period, cache_expiration=3)
# In total, we should get three cache updates in given time frame plus
# one NOOP due to cache not being expired yet:
timeout = cache_poll_period * 3 + 1
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
timeout=timeout,
line_buffer=ar.stderr_line_buffer)
lbf.scan_log_buffer()
# Do a request that uses cache so that we can verify that data was
# in fact cached and no more than one req to mesos/marathon
# backends were made
ping_mesos_agent(ar, valid_user_header)
mesos_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
marathon_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='get_recorded_requests')
assert lbf.extra_matches == {}
assert len(mesos_requests) == 3
assert len(marathon_requests) == 6
def test_if_cache_refresh_is_triggered_by_request(
self, nginx_class, mocker, valid_user_header):
"""...right after Nginx has started."""
filter_regexp = {
'Executing cache refresh triggered by request': SearchCriteria(1, True),
r'Cache `[\s\w]+` empty. Fetching.': SearchCriteria(3, True),
'Updated Mesos state cache': SearchCriteria(1, True),
'Updated Marathon apps cache': SearchCriteria(1, True),
'Updated marathon leader cache': SearchCriteria(1, True),
}
# Enable recording for marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='record_requests')
# Enable recording for mesos
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='record_requests')
# Make sure that timers will not interfere:
ar = nginx_class(cache_first_poll_delay=120,
cache_poll_period=120,
cache_expiration=115)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
timeout=5,
line_buffer=ar.stderr_line_buffer)
ping_mesos_agent(ar, valid_user_header)
lbf.scan_log_buffer()
# Do an extra request so that we can verify that data was in fact
# cached and no more than one req to mesos/marathon backends were
# made
ping_mesos_agent(ar, valid_user_header)
mesos_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
marathon_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='get_recorded_requests')
assert lbf.extra_matches == {}
assert len(mesos_requests) == 1
assert len(marathon_requests) == 2
def test_if_broken_marathon_causes_marathon_cache_to_expire_and_requests_to_fail(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Marathon app request failed: invalid response status: 500':
SearchCriteria(1, False),
'Updated Mesos state cache':
SearchCriteria(2, False),
'Cache entry `svcapps` is too old, aborting request':
SearchCriteria(1, True),
}
ar = nginx_class(cache_max_age_soft_limit=3,
cache_max_age_hard_limit=4,
cache_expiration=2,
cache_poll_period=3,
)
url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/')
with GuardedSubprocess(ar):
# Register Line buffer filter:
lbf = LineBufferFilter(filter_regexp,
timeout=5, # Just to give LBF enough time
line_buffer=ar.stderr_line_buffer)
# Trigger cache update by issuing request:
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
# Break marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='always_bork',
aux_data=True)
# Wait for the cache to be old enough to be discarded by AR:
# cache_max_age_hard_limit + 1s for good measure
# must be more than cache_poll_period
time.sleep(4 + 1)
# Perform the main/test request:
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 503
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_temp_marathon_borkage_does_not_disrupt_caching(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Marathon app request failed: invalid response status: 500':
SearchCriteria(1, False),
'Updated Mesos state cache':
SearchCriteria(2, False),
'Cache entry `svcapps` is stale':
SearchCriteria(1, True),
}
ar = nginx_class(cache_max_age_soft_limit=3,
cache_max_age_hard_limit=1200,
cache_expiration=2,
cache_poll_period=3,
)
url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/')
with GuardedSubprocess(ar):
# Register Line buffer filter:
lbf = LineBufferFilter(filter_regexp,
timeout=5, # Just to give LBF enough time
line_buffer=ar.stderr_line_buffer)
# Trigger cache update by issuing request:
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
# Break marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='always_bork',
aux_data=True)
# Wait for the cache to be old enough to be considered stale by AR:
# cache_max_age_soft_limit + 1s for a good measure
time.sleep(3 + 1)
# Perform the main/test request:
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_broken_mesos_causes_mesos_cache_to_expire_and_requests_to_fail(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Mesos state request failed: invalid response status: 500':
SearchCriteria(1, False),
'Updated Marathon apps cache':
SearchCriteria(2, False),
'Cache entry `mesosstate` is too old, aborting request':
SearchCriteria(1, True),
}
ar = nginx_class(cache_poll_period=3,
cache_expiration=2,
cache_max_age_soft_limit=3,
cache_max_age_hard_limit=4,
)
with GuardedSubprocess(ar):
# Register Line buffer filter:
lbf = LineBufferFilter(filter_regexp,
timeout=5, # Just to give LBF enough time
line_buffer=ar.stderr_line_buffer)
# Trigger cache update using a request:
ping_mesos_agent(ar, valid_user_header)
# Break mesos
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='always_bork',
aux_data=True)
# Wait for the cache to be old enough to be discarded by AR:
# cache_max_age_hard_limit + 1s for good measure
# must be more than cache_poll_period
time.sleep(4 + 1)
# Perform the main/test request:
ping_mesos_agent(ar, valid_user_header, expect_status=503)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_temp_mesos_borkage_does_not_dirupt_caching(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Mesos state request failed: invalid response status: 500':
SearchCriteria(1, False),
'Updated Marathon apps cache':
SearchCriteria(2, False),
'Cache entry `mesosstate` is stale':
SearchCriteria(1, True),
}
ar = nginx_class(cache_poll_period=3,
cache_expiration=2,
cache_max_age_soft_limit=3,
cache_max_age_hard_limit=1800,
)
with GuardedSubprocess(ar):
# Register Line buffer filter:
lbf = LineBufferFilter(filter_regexp,
timeout=5, # Just to give LBF enough time
line_buffer=ar.stderr_line_buffer)
# Trigger cache update using a request:
ping_mesos_agent(ar, valid_user_header)
# Break mesos
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='always_bork',
aux_data=True)
# Wait for the cache to be old enough to become stale:
# cache_max_age_soft_limit + 1s for good measure
time.sleep(3 + 1)
# Perform the main/test request:
ping_mesos_agent(ar, valid_user_header, expect_status=200)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_broken_marathon_does_not_break_mesos_cache(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Marathon app request failed: invalid response status: 500':
SearchCriteria(1, True),
'Updated Mesos state cache':
SearchCriteria(1, True),
}
# Break marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='always_bork',
aux_data=True)
ar = nginx_class()
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
timeout=(CACHE_FIRST_POLL_DELAY + 1),
line_buffer=ar.stderr_line_buffer)
ping_mesos_agent(ar, valid_user_header)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_broken_mesos_does_not_break_marathon_cache(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Mesos state request failed: invalid response status: 500':
SearchCriteria(1, True),
'Updated Marathon apps cache': SearchCriteria(1, True),
}
# Break marathon
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='always_bork',
aux_data=True)
ar = nginx_class()
url = ar.make_url_from_path('/service/scheduler-alwaysthere/bar/baz')
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
timeout=(CACHE_FIRST_POLL_DELAY + 1),
line_buffer=ar.stderr_line_buffer)
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
lbf.scan_log_buffer()
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.1:16000'
assert lbf.extra_matches == {}
def test_if_changing_marathon_apps_is_reflected_in_cache(
self, nginx_class, valid_user_header, mocker):
cache_poll_period = 4
ar = nginx_class(cache_poll_period=cache_poll_period, cache_expiration=3)
url = ar.make_url_from_path('/service/scheduler-alwaysthere/bar/baz')
with GuardedSubprocess(ar):
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.1:16000'
new_apps = {"apps": [SCHEDULER_APP_ALWAYSTHERE_DIFFERENTPORT, ]}
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data=new_apps)
# First poll (2s) + normal poll interval(4s) < 2 * normal poll
# interval(4s)
time.sleep(cache_poll_period * 2)
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.15:16001'
def test_if_changing_mesos_state_is_reflected_in_cache(
self, nginx_class, valid_user_header, mocker):
cache_poll_period = 4
ar = nginx_class(cache_poll_period=cache_poll_period, cache_expiration=3)
with GuardedSubprocess(ar):
ping_mesos_agent(ar,
valid_user_header,
agent_id=EXTRA_AGENT_DICT['id'],
expect_status=404)
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='enable_extra_agent')
# First poll (2s) + normal poll interval(4s) < 2 * normal poll
# interval(4s)
time.sleep(cache_poll_period * 2)
ping_mesos_agent(ar,
valid_user_header,
agent_id=EXTRA_AGENT_DICT['id'],
endpoint_id='http://127.0.0.4:15003')
def test_if_changing_marathon_leader_is_reflected_in_cache(
self, nginx_class, mocker, valid_user_header):
cache_poll_period = 4
ar = nginx_class(cache_poll_period=cache_poll_period, cache_expiration=3)
url = ar.make_url_from_path('/system/v1/leader/marathon/foo/bar/baz')
with GuardedSubprocess(ar):
# let's make sure that current leader is the default one
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.2:80'
# change the leader and wait for cache to notice
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='change_leader',
aux_data="127.0.0.3:80")
# First poll (2s) + normal poll interval(4s) < 2 * normal poll
# interval(4s)
time.sleep(cache_poll_period * 2)
# now, let's see if the leader changed
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.3:80'
def test_if_absence_of_marathon_leader_is_handled_by_cache(
self, nginx_class, mocker, valid_user_header):
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='remove_leader')
ar = nginx_class()
url = ar.make_url_from_path('/system/v1/leader/marathon/foo/bar/baz')
with GuardedSubprocess(ar):
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 503
def test_if_absence_of_agent_is_handled_by_cache(
self, nginx_class, mocker, valid_user_header):
ar = nginx_class()
with GuardedSubprocess(ar):
ping_mesos_agent(
ar,
valid_user_header,
agent_id='bdcd424a-b59e-4df4-b492-b54e38926bd8-S0',
expect_status=404)
def test_if_caching_works_for_mesos_state(
self, nginx_class, mocker, valid_user_header):
# Enable recording for mesos
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='record_requests')
ar = nginx_class()
with GuardedSubprocess(ar):
# Let the cache warm-up:
time.sleep(CACHE_FIRST_POLL_DELAY + 1)
for _ in range(3):
ping_mesos_agent(ar, valid_user_header)
mesos_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
# 3 requests + only one upstream request == cache works
assert len(mesos_requests) == 1
def test_if_caching_works_for_marathon_apps(
self, nginx_class, mocker, valid_user_header):
# Enable recording for marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='record_requests')
# Enable recording for mesos
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='record_requests')
ar = nginx_class()
url = ar.make_url_from_path('/service/scheduler-alwaysthere/bar/baz')
with GuardedSubprocess(ar):
# Let the cache warm-up:
time.sleep(CACHE_FIRST_POLL_DELAY + 1)
for _ in range(5):
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
mesos_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
marathon_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='get_recorded_requests')
# 3 requests + only one upstream requst == cache works
assert len(mesos_requests) == 1
assert len(marathon_requests) == 2
def test_if_caching_works_for_marathon_leader(
self, nginx_class, mocker, valid_user_header):
# Enable recording for marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='record_requests')
ar = nginx_class()
url = ar.make_url_from_path('/system/v1/leader/marathon/foo/bar/baz')
with GuardedSubprocess(ar):
# Let the cache warm-up:
time.sleep(CACHE_FIRST_POLL_DELAY + 1)
for _ in range(5):
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.2:80'
marathon_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='get_recorded_requests')
# 3 requests + only one upstream request == cache works
assert len(marathon_requests) == 2
def test_if_broken_response_from_marathon_is_handled(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
'Cannot decode marathon leader JSON': SearchCriteria(1, True),
}
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='break_leader_reply')
ar = nginx_class()
url = ar.make_url_from_path('/system/v1/leader/marathon/foo/bar/baz')
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
timeout=(CACHE_FIRST_POLL_DELAY + 1),
line_buffer=ar.stderr_line_buffer)
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
lbf.scan_log_buffer()
assert resp.status_code == 503
assert lbf.extra_matches == {}
def test_if_failed_request_triggered_update_is_recovered_by_timers(
self, nginx_class, valid_user_header, mocker, log_catcher):
# The idea here is to make Backend a bit slow, so that AR is still able
# to update cache on first request.
first_poll_delay = 3
poll_period = 3
cache_expiration = 2
# Take cache invalidation out of the picture
ar = nginx_class(cache_first_poll_delay=first_poll_delay,
cache_poll_period=poll_period,
cache_expiration=cache_expiration,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
)
# Make mesos just a bit :)
# It mus respond slower than backend_request_timeout
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='always_bork',
aux_data=True)
with GuardedSubprocess(ar):
start = time.time()
# Let's break the cache by making it update against broken Mesos:
ping_mesos_agent(ar, valid_user_header, expect_status=503)
time.sleep(1)
# Let's make sure that the brokerage is still there
ping_mesos_agent(ar, valid_user_header, expect_status=503)
# Healing hands!
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='always_bork',
aux_data=False)
# Let' wait for first poll to refresh cache
time.sleep(1 + (first_poll_delay - (time.time() - start)))
# Verify that the cache is OK now
ping_mesos_agent(ar, valid_user_header)
def test_if_early_boot_stage_can_recover_from_a_bit_slow_backend(
self, nginx_class, valid_user_header, mocker, log_catcher):
# The idea here is to make Backend a bit slow, so that AR is still able
# to update cache on first request.
refresh_lock_timeout = 10
backend_request_timeout = 5
ar = nginx_class(cache_first_poll_delay=1,
cache_poll_period=3,
cache_expiration=2,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
cache_backend_request_timeout=backend_request_timeout,
cache_refresh_lock_timeout=refresh_lock_timeout,
)
agent_id = AGENT1_ID
url = ar.make_url_from_path('/agent/{}/blah/blah'.format(agent_id))
v = Vegeta(log_catcher, target=url, jwt=valid_user_header, rate=3)
# Make mesos just a bit :)
# It mus respond slower than backend_request_timeout
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='always_stall',
aux_data=backend_request_timeout * 0.3)
with GuardedSubprocess(ar):
with GuardedSubprocess(v):
time.sleep(backend_request_timeout * 0.3 + 1) # let it warm-up!
ping_mesos_agent(ar, valid_user_header)
# This test can succed 40-50% number of times if we remove the fix. Hence
# we re-run it here 5 times.
@pytest.mark.parametrize('execution_number', range(5))
def test_if_mesos_leader_failover_is_followed_by_cache_http(
self,
nginx_class,
valid_user_header,
mocker,
dns_server_mock,
execution_number):
# Nginx resolver enforces 5s (grep for `resolver ... valid=Xs`), so it
# is VERY important to use cache pool period of >5s.
cache_poll_period = 8
ar = nginx_class(
cache_poll_period=cache_poll_period,
cache_expiration=cache_poll_period - 1,
upstream_mesos="http://leader.mesos:5050",
)
# Enable recording for Mesos mocks:
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='record_requests')
mocker.send_command(endpoint_id='http://127.0.0.3:5050',
func_name='record_requests')
dns_server_mock.set_dns_entry('leader.mesos.', ip="127.0.0.2", ttl=2)
with GuardedSubprocess(ar):
# Force cache refresh early, so that we do not have to wait too
# long
ping_mesos_agent(ar,
valid_user_header,
agent_id=EXTRA_AGENT_DICT['id'],
expect_status=404)
dns_server_mock.set_dns_entry('leader.mesos.', ip="127.0.0.3", ttl=2)
# First poll, request triggered (0s) + normal poll interval(6s)
# interval(6s) + 2
time.sleep(cache_poll_period + 2)
mesosmock_pre_reqs = mocker.send_command(
endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
mesosmock_post_reqs = mocker.send_command(
endpoint_id='http://127.0.0.3:5050',
func_name='get_recorded_requests')
assert len(mesosmock_pre_reqs) == 1
assert len(mesosmock_post_reqs) == 1
class TestCacheMesosLeader:
def test_if_unset_hostip_var_is_handled(self, nginx_class, valid_user_header):
filter_regexp = {
('Private IP address of the host is unknown, '
'aborting cache-entry creation for mesos leader'):
SearchCriteria(1, True),
'Updated mesos leader cache':
SearchCriteria(1, True),
}
ar = nginx_class(host_ip=None)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
# Just trigger the cache update:
ping_mesos_agent(ar, valid_user_header)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_missing_mesos_leader_entry_is_handled(
self, nginx_class, valid_user_header, dns_server_mock):
filter_regexp = {
'Failed to instantiate the resolver': SearchCriteria(0, True),
'DNS server returned error code': SearchCriteria(1, True),
'Updated mesos leader cache':
SearchCriteria(0, True),
}
ar = nginx_class()
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp,
line_buffer=ar.stderr_line_buffer)
# Unfortunatelly there are upstreams that use `leader.mesos` and
# removing this entry too early will result in Nginx failing to start.
# So we need to do it right after nginx starts, but before first
# cache update.
time.sleep(1)
dns_server_mock.remove_dns_entry('leader.mesos.')
# Now let's trigger the cache update:
ping_mesos_agent(ar, valid_user_header)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_mesos_leader_locality_is_resolved(
self, nginx_class, valid_user_header, dns_server_mock):
cache_poll_period = 4
nonlocal_leader_ip = "127.0.0.3"
local_leader_ip = "127.0.0.2"
filter_regexp_pre = {
'Failed to instantiate the resolver': SearchCriteria(0, True),
'mesos leader is non-local: `{}`'.format(nonlocal_leader_ip):
SearchCriteria(1, True),
('Private IP address of the host is unknown, '
'aborting cache-entry creation for mesos leader'):
SearchCriteria(0, True),
'Updated mesos leader cache':
SearchCriteria(1, True),
}
filter_regexp_post = {
'Failed to instantiate the resolver': SearchCriteria(0, True),
'mesos leader is local': SearchCriteria(1, True),
('Private IP address of the host is unknown, '
'aborting cache-entry creation for mesos leader'):
SearchCriteria(0, True),
'Updated mesos leader cache':
SearchCriteria(1, True),
}
dns_server_mock.set_dns_entry('leader.mesos.', ip=nonlocal_leader_ip)
ar = nginx_class(cache_poll_period=cache_poll_period, cache_expiration=3)
with GuardedSubprocess(ar):
lbf = LineBufferFilter(filter_regexp_pre,
line_buffer=ar.stderr_line_buffer)
# Just trigger the cache update:
ping_mesos_agent(ar, valid_user_header)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
dns_server_mock.set_dns_entry('leader.mesos.', ip=local_leader_ip)
# First poll (2s) + normal poll interval(4s) < 2 * normal poll
# interval(4s)
time.sleep(cache_poll_period * 2)
lbf = LineBufferFilter(filter_regexp_post,
line_buffer=ar.stderr_line_buffer)
# Just trigger the cache update:
ping_mesos_agent(ar, valid_user_header)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_if_backend_requests_have_useragent_set_correctly(
self, nginx_class, mocker, valid_user_header):
# Enable recording for marathon
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='record_requests')
# Enable recording for Mesos
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='record_requests')
# Make regular polling occur later than usual, so that we get a single
# cache refresh:
ar = nginx_class(cache_poll_period=60, cache_expiration=55)
with GuardedSubprocess(ar):
# Initiate cache refresh by issuing a request:
ping_mesos_agent(ar, valid_user_header)
mesos_requests = mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='get_recorded_requests')
marathon_requests = mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='get_recorded_requests')
assert len(mesos_requests) == 1
assert len(marathon_requests) == 2
# We could use a loop here, but let's make it a bit easier to debug:
verify_header(mesos_requests[0]['headers'],
'User-Agent',
'Master Admin Router')
verify_header(marathon_requests[0]['headers'],
'User-Agent',
'Master Admin Router')
verify_header(marathon_requests[1]['headers'],
'User-Agent',
'Master Admin Router')
class TestCacheMarathon:
@pytest.mark.parametrize('host_port', [12345, 0, None])
def test_app_with_container_networking_and_defined_container_port(
self, nginx_class, mocker, valid_user_header, host_port):
# Testing the case when a non-zero container port is specified
# in Marathon app definition with networking mode 'container'.
# It does not matter if the host port is fixed (non-zero),
# randomly assigned by Marathon (0) or is not present at all:
# Admin Router must route the request to the specified container port.
app = self._scheduler_alwaysthere_app()
app['networks'] = [{
'mode': 'container',
'name': 'samplenet'
}]
if host_port is not None:
app['container']['portMappings'] = [{'containerPort': 80, 'hostPort': host_port}]
else:
app['container']['portMappings'] = [{'containerPort': 80}]
app['tasks'][0]['ipAddresses'][0]['ipAddress'] = '127.0.0.2'
ar = nginx_class()
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": [app]})
url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/')
with GuardedSubprocess(ar):
# Trigger cache update by issuing request:
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.2:80'
@pytest.mark.parametrize('host_port', [12345, 0, None])
def test_app_with_container_networking_and_random_container_port(
self, nginx_class, mocker, valid_user_header, host_port):
# Testing the case when container port is specified as 0
# in Marathon app definition with networking mode 'container'.
# This means that the Marathon app container port is randomly assigned
# by Marathon. We are reusing port 16000 on 127.0.0.1 exposed by the
# mock server, as the one randomly chosen by Marathon.
# It does not matter if the host port is fixed (non-zero),
# randomly assigned by Marathon (0) or is not present at all:
# Admin Router must route the request to the specified container port.
app = self._scheduler_alwaysthere_app()
app['networks'] = [{
'mode': 'container',
'name': 'samplenet'
}]
if host_port is not None:
app['container']['portMappings'] = [{'containerPort': 0, 'hostPort': host_port}]
else:
app['container']['portMappings'] = [{'containerPort': 0}]
app['tasks'][0]['ipAddresses'][0]['ipAddress'] = '127.0.0.1'
app['tasks'][0]['ports'][0] = '16000'
ar = nginx_class()
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": [app]})
url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/')
with GuardedSubprocess(ar):
# Trigger cache update by issuing request:
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.1:16000'
@pytest.mark.parametrize(
'networking_mode, container_port',
[['container/bridge', '80'], ['container/bridge', '0'], ['host', '80'], ['host', '0']]
)
def test_app_with_bridge_and_host_networking(
self, nginx_class, mocker, valid_user_header, container_port, networking_mode):
# Testing the cases when networking mode is either 'container' or 'host'.
# The host port can be non-zero or 0. In the latter case Marathon will
# randomly choose the host port. For simplicity in this test we are
# reusing port 16000 on 127.0.0.1 exposed by the mock server, as both
# the fixed (non-zero) one and the one randomly chosen by Marathon.
# It does not matter if the container port is fixed (non-zero) or
# randomly assigned by Marathon (0) or: Admin Router must route the
# request to the host port.
app = self._scheduler_alwaysthere_app()
app['networks'] = [{
'mode': networking_mode
}]
app['container']['portMappings'] = [
{'containerPort': container_port, 'hostPort': 16000}]
app['tasks'][0]['ipAddresses'][0]['ipAddress'] = '127.0.0.1'
ar = nginx_class()
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": [app]})
url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/')
with GuardedSubprocess(ar):
# Trigger cache update by issuing request:
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == 'http://127.0.0.1:16000'
def test_upstream_wrong_json(
self, nginx_class, mocker, valid_user_header):
filter_regexp = {
"Cannot decode Marathon apps JSON: ": SearchCriteria(1, True),
}
ar = nginx_class()
# Set wrong non-json response content
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_encoded_response',
aux_data=b"wrong response")
url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/')
with GuardedSubprocess(ar):
# Register Line buffer filter:
lbf = LineBufferFilter(filter_regexp,
timeout=5, # Just to give LBF enough time
line_buffer=ar.stderr_line_buffer)
# Trigger cache update by issuing request:
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
expected = "503 Service Unavailable: invalid Marathon svcapps cache"
assert expected == resp.content.decode('utf-8').strip()
assert resp.status_code == 503
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def test_app_without_labels(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app.pop("labels", None)
filter_regexp = {
"Labels not found in app '{}'".format(app["id"]): SearchCriteria(1, True),
}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def test_app_without_service_scheme_label(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app["labels"].pop("DCOS_SERVICE_SCHEME", None)
filter_regexp = {
"Cannot find DCOS_SERVICE_SCHEME for app '{}'".format(app["id"]):
SearchCriteria(1, True),
}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def test_app_without_port_index_label(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app["labels"].pop("DCOS_SERVICE_PORT_INDEX", None)
filter_regexp = {
"Cannot find DCOS_SERVICE_PORT_INDEX for app '{}'".format(app["id"]):
SearchCriteria(1, True),
}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def test_app_container_networking_with_invalid_port_mapping_index_label(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app["labels"]["DCOS_SERVICE_PORT_INDEX"] = "1"
app['networks'] = [{'mode': 'container', 'name': 'samplenet'}]
app['container']['portMappings'] = [{'containerPort': 16000, 'hostPort': 16000}]
message = (
"Cannot find port in container portMappings at Marathon "
"port index '1' for app '{app_id}'"
).format(app_id=app["id"])
filter_regexp = {message: SearchCriteria(1, True)}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def test_app_container_networking_with_invalid_task_port_index_label(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app["labels"]["DCOS_SERVICE_PORT_INDEX"] = "1"
app['networks'] = [{'mode': 'container', 'name': 'samplenet'}]
app['container']['portMappings'] = [
{'containerPort': 7777, 'hostPort': 16000},
{'containerPort': 0},
]
message = (
"Cannot find port in task ports at Marathon "
"port index '1' for app '{app_id}'"
).format(app_id=app["id"])
filter_regexp = {message: SearchCriteria(1, True)}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
@pytest.mark.parametrize('networking_mode', ['container/bridge', 'host'])
def test_app_networking_with_invalid_task_port_index_label(
self, nginx_class, mocker, valid_user_header, networking_mode):
app = self._scheduler_alwaysthere_app()
app["labels"]["DCOS_SERVICE_PORT_INDEX"] = "1"
app['networks'] = [{'mode': networking_mode}]
message = (
"Cannot find port in task ports at Marathon "
"port index '1' for app '{app_id}'"
).format(app_id=app["id"])
filter_regexp = {message: SearchCriteria(1, True)}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def test_app_with_port_index_nan_label(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app["labels"]["DCOS_SERVICE_PORT_INDEX"] = "not a number"
filter_regexp = {
"Cannot convert port to number for app '{}'".format(app["id"]):
SearchCriteria(1, True),
}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def test_app_without_mesos_tasks(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app["tasks"] = []
filter_regexp = {
"No task in state TASK_RUNNING for app '{}'".format(app["id"]):
SearchCriteria(1, True),
}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def test_app_without_tasks_in_running_state(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app["tasks"] = [{"state": "TASK_FAILED"}]
filter_regexp = {
"No task in state TASK_RUNNING for app '{}'".format(app["id"]):
SearchCriteria(1, True),
}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def test_app_without_task_host(
self, nginx_class, mocker, valid_user_header):
app = self._scheduler_alwaysthere_app()
app["tasks"][0].pop("host", None)
filter_regexp = {
"Cannot find host or ip for app '{}'".format(app["id"]):
SearchCriteria(1, True),
}
self._assert_filter_regexp_for_invalid_app(
filter_regexp, app, nginx_class, mocker, valid_user_header)
def _assert_filter_regexp_for_invalid_app(
self,
filter_regexp,
app,
nginx_class,
mocker,
auth_headers,
):
"""Helper method that will assert if provided regexp filter is found
in nginx logs for given apps response from Marathon upstream endpoint.
Arguments:
filter_regexp (dict): Filter definition where key is the message
looked up in logs and value is SearchCriteria definition
app (dict): App that upstream endpoint should respond with
nginx_class (Nginx): Nginx process fixture
mocker (Mocker): Mocker fixture
auth_header (dict): Headers that should be passed to Nginx in the
request
"""
ar = nginx_class()
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": [app]})
# Remove all entries for mesos frameworks and mesos_dns so that
# we test only the information in Marathon
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[])
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=[])
url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/')
with GuardedSubprocess(ar):
# Register Line buffer filter:
lbf = LineBufferFilter(filter_regexp,
timeout=5, # Just to give LBF enough time
line_buffer=ar.stderr_line_buffer)
# Trigger cache update by issuing request:
resp = requests.get(url,
allow_redirects=False,
headers=auth_headers)
assert resp.status_code == 404
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
def _scheduler_alwaysthere_app(self):
"""Returns a valid Marathon app with the '/scheduler-alwaysthere' id"""
return copy.deepcopy(SCHEDULER_APP_ALWAYSTHERE)
| apache-2.0 | 5,825,491,405,293,336,000 | 40.92916 | 94 | 0.54546 | false |
choeminjun/rasberryPI | MailSender/SendMail_ras.py | 1 | 6156 | import smtplib, logging, datetime, imaplib
import email
AUTH_EMAIL_SENDER = '[email protected]'
class MailSenderAPP(object):
def __init__(self, my_email, my_password):
self.myEmail = my_email
self.myPassword = my_password
self.mailSever = smtplib.SMTP("smtp.gmail.com", 587)
# Emailsuq = email
# self.Subject = subject
@staticmethod
def __loggerSetup__():
logging.basicConfig(filename='logging.txt', level=logging.DEBUG)
def connect_to_server(self):
try:
logging.info('Trying to connect to gmail sever...')
self.mailSever.login(self.myEmail, self.myPassword)
self.mailSever.ehlo()
self.mailSever.starttls()
logging.info('connect to sever:success')
except Exception as Error:
logging.error('Cant connect to gmail sever. Error messge:' + str(Error))
return 'Sever connect Error:' + str(Error)
def EmailSender(self, Emailsuq, subject):
logging.info('--------Program Starting at:%s.-------------' % (datetime.datetime.now()))
if type(Emailsuq).__name__ != 'list':
logging.error('Emailsuq have to be a list ,like this: ["[email protected]"]')
return 'Emailsuq have to be a list Error' + str(type(Emailsuq))
try:
logging.info('Trying to login With Email and password...')
logging.info('logining to gmail sever:success')
except Exception as Error:
logging.error('Cant login to gmail sever. Error messge:' + str(Error))
return 'Login Error:' + str(Error)
try:
logging.info('Sending mail to %s...' % (Emailsuq))
Email_number = 0
for email in Emailsuq:
self.mailSever.sendmail(self.myEmail, email, subject)
Email_number += 1
logging.info('Sending mail to %s:success' % (Emailsuq))
except Exception as Error:
logging.info('Cant Send mail to %s. Error messge:'+str(Error))
return 'Mail sending Error:' + str(Error)
return True
def end_connection(self):
self.mailSever.close()
logging.info('-----------Program Exited-------------')
#
#
# def main(my_email, my_password, email=[], subject='Subject:SE\nTyto alab'):
# MailSenderAPP.__loggerSetup__()
# status = MailSenderAPP(my_email, my_password, email, subject).EmailSender()
#
# return status
class MailReaderAPP(object):
def __init__(self, my_email, my_password):
self.myEmail = my_email
self.myPassword = my_password
self.mailSever = imaplib.IMAP4_SSL("smtp.gmail.com", 993)
@staticmethod
def __loggerSetup__():
logging.basicConfig(filename='logging.txt', level=logging.DEBUG)
logging.debug('--------------MailReaderAPP----------------')
def connect_to_server(self):
try:
logging.info('Trying to connect to gmail sever...')
# self.mailSever.starttls()
self.mailSever.login(self.myEmail, self.myPassword)
logging.info('connect to sever:success')
except Exception as Error:
logging.error('Cant connect to gmail sever. Error messge:' + str(Error))
return 'Sever connect Error:' + str(Error)
def read_latest_mail_and_command(self):
try:
logging.info('Trying to connect to gmail sever...')
logging.info('selecting inbox...')
self.mailSever.list()
self.mailSever.select('inbox')
unread_emails = []
logging.info('getting unseen emails...')
result, data = self.mailSever.uid('search', None, "UNSEEN") # (ALL/UNSEEN)
i = len(data[0].split())
for x in range(i):
logging.info('Decoding unseen email' + str(x))
latest_email_uid = data[0].split()[x]
result, email_data = self.mailSever.uid('fetch', latest_email_uid, '(RFC822)')
# result, email_data = conn.store(num,'-FLAGS','\\Seen')
# this might work to set flag to seen, if it doesn't already
raw_email = email_data[0][1]
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)
# Header Details
date_tuple = email.utils.parsedate_tz(email_message['Date'])
if date_tuple:
local_date = datetime.datetime.fromtimestamp(email.utils.mktime_tz(date_tuple))
local_message_date = "%s" % (str(local_date.strftime("%a, %d %b %Y %H:%M:%S")))
email_from = str(email.header.make_header(email.header.decode_header(email_message['From'])))
email_to = str(email.header.make_header(email.header.decode_header(email_message['To'])))
subject = str(email.header.make_header(email.header.decode_header(email_message['Subject'])))
# Body details
logging.info('getting body details...')
for part in email_message.walk():
if part.get_content_type() == "text/plain":
logging.info('getting body details of '+ str(part))
body = part.get_payload(decode=True)
unread_emails.append({'Body': body.decode('utf-8'), 'sender': email_from})
else:
continue
try:
logging.info('returning resaults...')
unread_email_ = []
for i in unread_emails:
if i['sender'] == '최민준 <[email protected]>':
unread_email_.append(i)
return unread_email_
except:
return None
except Exception as E:
logging.error('Error while finding latest email' + str(E))
return 'Sever email read error:' + str(E)
def end_connection(self):
self.mailSever.close()
logging.info('-----------Program Exited-------------')
| apache-2.0 | 941,600,670,764,131,200 | 39.728477 | 109 | 0.559024 | false |
Julian24816/lHelper | __main__.py | 1 | 1440 | # coding=utf-8
#
# Copyright (C) 2016 Julian Mueller
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
lHelper project: python application for helping me learn Latin
"""
import argparse
from sys import path
from os import chdir
from os.path import dirname, abspath
ENABLE_DATA_COMMANDS = False
__version__ = "1.4.7"
__author__ = "Julian Mueller"
path_to_main = abspath(__file__)
path.append(dirname(path_to_main))
chdir(dirname(path_to_main))
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gui", action="store_const", const="gui", default="cli", dest="ui",
help="Run the program with a GUI instead of a CLI.")
ui_choice = parser.parse_args().ui
if ui_choice == "cli":
import cli
cli.main(__version__, enable_data_commands=ENABLE_DATA_COMMANDS)
elif ui_choice == "gui":
import main
main.main()
| gpl-3.0 | 7,290,843,507,566,273,000 | 31 | 95 | 0.717361 | false |
frank-u/elibrarian | migrations/versions/27c24cd72c1_user_personal_bookshelf.py | 1 | 1307 | """user personal bookshelf
Revision ID: 27c24cd72c1
Revises: 4a8674c1b8a
Create Date: 2015-04-20 20:46:20.702185
"""
# revision identifiers, used by Alembic.
revision = '27c24cd72c1'
down_revision = '4a8674c1b8a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('users_personal_library',
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('literary_work_id', sa.Integer(), nullable=False),
sa.Column('plan_to_read', sa.Boolean(), nullable=False),
sa.Column('read_flag', sa.Boolean(), nullable=False),
sa.Column('read_progress', sa.Integer(), nullable=True),
sa.Column('read_date', sa.Date(), nullable=True),
sa.Column('rating', sa.Integer(), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['literary_work_id'], ['literary_works.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['auth_users.id'], ),
sa.PrimaryKeyConstraint('user_id', 'literary_work_id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('users_personal_library')
### end Alembic commands ###
| gpl-3.0 | 5,012,969,641,236,505,000 | 32.512821 | 75 | 0.674063 | false |
nansencenter/DAPPER | dapper/mods/KS/demo.py | 1 | 2208 | """Demonstrate the Kuramoto-Sivashinsky (KS) system."""
# The Kuramoto-Sivashinsky (KS) system:
# u_t = -u*u_x - u_xx - u_xxxx,
# where x ∈ [0, L], periodic BCs,
# is the simplest (?) PDE that admits chaos (requires L>=12?):
#
# Its numerical solution is best undertaken
# with Fourier decomposition for the spatial variable.
# According to kassam2005fourth:
# - The equation is stiff, due to higher-order linear terms:
# - the diffusion term acts as an energy source,
# causing instability of high-order (large-scale Fourier) modes.
# - the hyper-diffusion term yields stability of the low-order modes.
# - The nonlinear term induces mixing of the (Fourier) modes.
#
# bocquet2019consistency use it with DA because:
# "it is characterised by sharp density gradients
# so that it may be expected that local EnKFs are prone to imbalance"
#
# hickmann2017multiresolution use it with DA becaues:
# "[The mixing allows us to] investigate the effect of
# propagating scale-dependent information through the EnKF."
#
# www.encyclopediaofmath.org/index.php/Kuramoto-Sivashinsky_equation:
# Number of unstable modes almost directly proportional to L?
#
# Applications:
# - modeling hydrodynamic stability of laminar flame fronts
# - instabilities in thin films and the flow of a viscous fluid down a vertical plane
# - etc
#
# It can be observed in the plots that sharpness from ICs
# remain in the system for a long time (for ever?).
import numpy as np
from matplotlib import pyplot as plt
from dapper.mods.KS import Model
from dapper.tools.viz import amplitude_animation
model = Model()
# Time settings
T = 150
dt = model.dt
K = round(T/dt)
# IC
N = 3
tt = np.zeros((K+1,))
EE = np.zeros((K+1, N, model.Nx))
# x0 = x0_Kassam
EE[0] = model.x0 + 1e-3*np.random.randn(N, model.Nx)
# Integrate
for k in range(1, K+1):
EE[k] = model.step(EE[k-1], np.nan, dt)
tt[k] = k*dt
# Animate
ani = amplitude_animation(EE, dt, interval=20)
# Plot
plt.figure()
n = 0
plt.contourf(model.grid, tt, EE[:, n, :], 60)
plt.colorbar()
plt.set_cmap('seismic')
plt.axis('tight')
plt.title('Hovmoller for KS system, member %d' % n)
plt.ylabel('Time (t)')
plt.xlabel('Space (x)')
plt.show()
| mit | 7,997,371,697,744,379,000 | 28.413333 | 85 | 0.707616 | false |
projectcuracao/projectcuracao | actions/useCamera.py | 1 | 2300 | # Takes a single picture on System
# filename: takeSinglePicture.py
# Version 1.0 10/31/13
#
# takes a picture using the camera
#
#
import sys
import time
import RPi.GPIO as GPIO
import serial
#import picamera
import subprocess
import MySQLdb as mdb
sys.path.append('/home/pi/ProjectCuracao/main/hardware')
sys.path.append('/home/pi/ProjectCuracao/main/pclogging')
sys.path.append('/home/pi/ProjectCuracao/main/util')
import pclogging
import util
import hardwareactions
def sweepShutter(source, delay):
print("sweepShutter source:%s" % source)
GPIO.setmode(GPIO.BOARD)
time.sleep(delay)
# blink GPIO LED when it's run
GPIO.setup(22, GPIO.OUT)
GPIO.output(22, False)
time.sleep(0.5)
GPIO.output(22, True)
time.sleep(0.5)
hardwareactions.sweepshutter()
time.sleep(3.0)
pclogging.log(pclogging.INFO, __name__, "Sweep Shutter")
def takePicture(source):
try:
f = open("/home/pi/ProjectCuracao/main/state/exposure.txt", "r")
tempString = f.read()
f.close()
lowername = tempString
except IOError as e:
lowername = "auto"
exposuremode = lowername
# take picture
print "taking picture"
cameracommand = "raspistill -o /home/pi/RasPiConnectServer/static/picameraraw.jpg -rot 180 -t 750 -ex " + exposuremode
print cameracommand
output = subprocess.check_output (cameracommand,shell=True, stderr=subprocess.STDOUT )
output = subprocess.check_output("convert '/home/pi/RasPiConnectServer/static/picameraraw.jpg' -pointsize 72 -fill white -gravity SouthWest -annotate +50+100 'ProjectCuracao %[exif:DateTimeOriginal]' '/home/pi/RasPiConnectServer/static/picamera.jpg'", shell=True, stderr=subprocess.STDOUT)
pclogging.log(pclogging.INFO, __name__, source )
print "finished taking picture"
return
def takeSinglePicture(source, delay):
print("takeSinglePicture source:%s" % source)
GPIO.setmode(GPIO.BOARD)
time.sleep(delay)
# blink GPIO LED when it's run
GPIO.setup(22, GPIO.OUT)
GPIO.output(22, False)
time.sleep(0.5)
GPIO.output(22, True)
time.sleep(0.5)
print GPIO.VERSION
hardwareactions.openshutter()
time.sleep(3.0)
takePicture("Single Picture Taken With Shutter")
hardwareactions.closeshutter()
return
| gpl-3.0 | -7,709,868,329,935,272,000 | 22.232323 | 290 | 0.706957 | false |
heewei/ROSUnit00 | build/catkin_generated/generate_cached_setup.py | 1 | 1261 | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/hewei/rosWS/devel/env.sh')
output_filename = '/home/hewei/rosWS/build/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| gpl-3.0 | 3,957,741,896,794,669,000 | 41.033333 | 102 | 0.71134 | false |
hman523/QuickStocks | QuickStocks.py | 1 | 5274 | #Quick Stocks v.2
#Author: Hunter Barbella (aka hman523)
#Use: to have a command line interface for checking stocks
#This code uses the GPL licence while the API uses the MIT licience
#This code is provided AS IS and provides no warrenty
#Imports the libraries requests for making the GET request and
#JSON for parsing the request
#sys and argparse for command line options
import requests
import json
import sys
import argparse
#This is the API URL that is used
emptyUrl = "http://dev.markitondemand.com/MODApis/Api/v2/Quote/jsonp?symbol="
#A nice welcome screen to print
def welcomePrint():
print(" /$$$$$$ /$$ /$$ \n" +
" /$$__ $$ |__/ | $$ \n" +
"| $$ \ $$ /$$ /$$ /$$ /$$$$$$$| $$ /$$ \n" +
"| $$ | $$| $$ | $$| $$ /$$_____/| $$ /$$/ \n" +
"| $$ | $$| $$ | $$| $$| $$ | $$$$$$/ \n" +
"| $$/$$ $$| $$ | $$| $$| $$ | $$_ $$ \n" +
"| $$$$$$/| $$$$$$/| $$| $$$$$$$| $$ \ $$ \n" +
" \____ $$$ \______/ |__/ \_______/|__/ \__/ \n" +
" \__/ \n" +
" \n" +
" \n" +
" /$$$$$$ /$$ /$$ \n" +
" /$$__ $$ | $$ | $$ \n" +
"| $$ \__//$$$$$$ /$$$$$$ /$$$$$$$| $$ /$$ /$$$$$$$\n" +
"| $$$$$$|_ $$_/ /$$__ $$ /$$_____/| $$ /$$/ /$$_____/\n" +
" \____ $$ | $$ | $$ \ $$| $$ | $$$$$$/ | $$$$$$ \n" +
" /$$ \ $$ | $$ /$$| $$ | $$| $$ | $$_ $$ \____ $$\n" +
"| $$$$$$/ | $$$$/| $$$$$$/| $$$$$$$| $$ \ $$ /$$$$$$$/\n" +
" \______/ \___/ \______/ \_______/|__/ \__/|_______/ \n" +
"\n\nVersion: 2.0 Author: Hunter Barbella (AKA hman523)\n\n"
)
#Informs user how to leave program
print("To quit type quit or Control + \"c\"")
#calls the api and returns a nice string of the json
def callApi(stockSymbol):
stockSymbol.upper()
apiCall = requests.get(emptyUrl + stockSymbol)
apiCall = str(apiCall.content)
#Clean up the junk by gettin rid of the unneeded data
indexOfStatus = apiCall.find('\"Status\"')
length = len(apiCall)
apiCall = apiCall[(indexOfStatus-1):length-2]
return apiCall
#converts the string to a json file if it can, if not it returns none
def apiCallToJson(call):
if(len(call) > 0):
jsonOfCall = json.loads(call)
return jsonOfCall
else:
return None
#prints all metadata from a given json
def printAllInfo(jsonOfCall):
if(jsonOfCall is not None and jsonOfCall['Timestamp'] is not None):
print("Firm- " + jsonOfCall['Name'])
print("Symbol- " + jsonOfCall['Symbol'])
print("Last Price- " + str(jsonOfCall['LastPrice']))
print("Change- " + str(jsonOfCall['Change']))
print("Percent Change- " + str(jsonOfCall['ChangePercent']) + "%")
print("Time- " + str(jsonOfCall['Timestamp']))
print("Market Cap- " + str(jsonOfCall['MarketCap']))
print("Volume- " + str(jsonOfCall['Volume']))
print("High- " + str(jsonOfCall['High']))
print("Low- " + str(jsonOfCall['Low']))
print("Open- " + str(jsonOfCall['Open']))
print("Year To Date Change- " + str(jsonOfCall['ChangeYTD']))
print("Year To Date Percent Change- " + str(jsonOfCall['ChangePercentYTD']) + "%")
print("")
else:
error = "unknown error occured"
if(jsonOfCall is None):
error = "stock doesn't exist"
else:
if(jsonOfCall['LastPrice'] is 0 and jsonOfCall['MarketCap'] is 0):
error = ("server error with stock " + jsonOfCall['Symbol'])
print("Error occured: " + error + "\n")
#gets the user input and returns it, also checks if user quits program
def getUserInput():
print("Enter a ticket symbol for a firm or load file:")
userInput = input()
if(userInput.lower() == 'quit'):
quit()
userInput = userInput.replace(" ", "")
return userInput
#using a filename, this opens and returns stock info
def getStocksFromFile(stockFile):
with open(stockFile) as f:
listOfNames = f.readlines()
listOfNames = [i.strip() for i in listOfNames]
return listOfNames
#Main loop in the program
#Asks the user for a stock symbol and searches info based on that
def main():
welcomePrint()
descriptionString = """Arguments: -f -q
f: file name
q: quick lookup option
"""
parser = argparse.ArgumentParser(description=descriptionString)
parser.add_argument('-sparams', nargs=1, dest='sparams', required=False,
help="Use the argument -q or -f")
while(True):
#It gets the user inout, calls the api with it,
# converts it to a JSON then it prints the data.
userIn = getUserInput()
if (userIn.startswith('load')):
names = getStocksFromFile(userIn[4:])
print("Reading from " + userIn[4:])
for n in names:
print("Reading...")
printAllInfo(apiCallToJson(callApi(n)))
else:
printAllInfo(apiCallToJson(callApi(userIn)))
if __name__ == '__main__':
main() | gpl-3.0 | 71,577,834,545,399,464 | 31.392405 | 84 | 0.502465 | false |
sa2ajj/DistroTracker | pts/mail/tests/tests_mail_news.py | 1 | 5377 | # -*- coding: utf-8 -*-
# Copyright 2013 The Distro Tracker Developers
# See the COPYRIGHT file at the top-level directory of this distribution and
# at http://deb.li/DTAuthors
#
# This file is part of Distro Tracker. It is subject to the license terms
# in the LICENSE file found in the top-level directory of this
# distribution and at http://deb.li/DTLicense. No part of Distro Tracker,
# including this file, may be copied, modified, propagated, or distributed
# except according to the terms contained in the LICENSE file.
"""
Tests for the :mod:`pts.mail.mail_news` app.
"""
from __future__ import unicode_literals
from django.test import TestCase, SimpleTestCase
from django.utils import six
from django.utils.six.moves import mock
from django.utils.encoding import force_bytes
from pts.core.models import SourcePackageName, SourcePackage
from pts.core.models import News
from pts.core.tests.common import temporary_media_dir
from pts.mail.mail_news import process
from pts.mail.management.commands.pts_receive_news import (
Command as MailNewsCommand)
from email.message import Message
class BasicNewsGeneration(TestCase):
def setUp(self):
self.package_name = SourcePackageName.objects.create(name='dummy-package')
self.package = SourcePackage.objects.create(
source_package_name=self.package_name,
version='1.0.0')
self.message = Message()
def set_subject(self, subject):
if 'Subject' in self.message:
del self.message['Subject']
self.message['Subject'] = subject
def add_header(self, header_name, header_value):
self.message[header_name] = header_value
def set_message_content(self, content):
self.message.set_payload(content)
def process_mail(self):
process(force_bytes(self.message.as_string(), 'utf-8'))
@temporary_media_dir
def test_creates_news_from_email(self):
"""
Tets that a news is created from an email with the correct header
information.
"""
subject = 'Some message'
content = 'Some message content'
self.set_subject(subject)
self.add_header('X-PTS-Package', self.package.name)
self.set_message_content(content)
self.process_mail()
# A news item is created
self.assertEqual(1, News.objects.count())
news = News.objects.all()[0]
# The title of the news is set correctly.
self.assertEqual(subject, news.title)
self.assertIn(content, news.content)
# The content type is set to render email messages
self.assertEqual(news.content_type, 'message/rfc822')
@temporary_media_dir
def test_create_news_url_from_email(self):
"""
Tests that when an X-PTS-Url header is given the news content is the
URL, not the email message.
"""
subject = 'Some message'
content = 'Some message content'
self.set_subject(subject)
self.add_header('X-PTS-Package', self.package.name)
url = 'http://some-url.com'
self.add_header('X-PTS-Url', url)
self.set_message_content(content)
self.process_mail()
# A news item is created
self.assertEqual(1, News.objects.count())
news = News.objects.all()[0]
# The title of the news is set correctly.
self.assertEqual(url, news.title)
self.assertIn(url, news.content.strip())
@temporary_media_dir
def test_create_news_package_does_not_exist(self):
"""
Tests that when the package given in X-PTS-Package does not exist, no
news items are created.
"""
subject = 'Some message'
content = 'Some message content'
self.set_subject(subject)
self.add_header('X-PTS-Package', 'no-exist')
self.set_message_content(content)
# Sanity check - there are no news at the beginning
self.assertEqual(0, News.objects.count())
self.process_mail()
# There are still no news
self.assertEqual(0, News.objects.count())
@mock.patch('pts.mail.mail_news.vendor.call')
@temporary_media_dir
def test_create_news_calls_vendor_function(self, mock_vendor_call):
"""
Tests that the vendor-provided function is called during the processing
of the news.
"""
subject = 'Some message'
content = 'Some message content'
# Do not add any headers.
self.set_subject(subject)
self.set_message_content(content)
# Make it look like the vendor does not implement the function
mock_vendor_call.return_value = (None, False)
self.process_mail()
# The function was called?
self.assertTrue(mock_vendor_call.called)
# The correct vendor function was asked for?
self.assertEqual(mock_vendor_call.call_args[0][0], 'create_news_from_email_message')
class MailNewsManagementCommandTest(SimpleTestCase):
"""
Tests that the :mod:`pts.mail.management.commands.pts_receive_news`
management command calls the correct function.
"""
@mock.patch('pts.mail.management.commands.pts_receive_news.process')
def test_calls_process(self, mock_process):
cmd = MailNewsCommand()
cmd.input_file = mock.create_autospec(six.BytesIO)
mock_process.assert_called()
| gpl-2.0 | -2,727,537,387,692,635,000 | 34.609272 | 92 | 0.660963 | false |
dani-i/bachelor-project | graphics/gui/test/automated_test_sess_gui.py | 1 | 13343 | import tkinter as tk
import constants.gui_constants as const
from controllers.data_set_controller import DataSetController
from controllers.session_controller import SessionController
from file_experts.data_set.data_set_validator import DataSetValidator
from graphics.output.test_sess.test_sess_output_f import TestSessOutputF
from graphics.output.train_sess.train_sess_details_output_f import TrainSessDetailsOutputF
from graphics.widgets.browse_f import BrowseF
from graphics.widgets.scrollable_canvas_c import ScrollableCanvasC
from graphics.widgets.validated_browse_f import ValidatedBrowseF
from session.test_session import TestSession
from utils.charts.chart_entries import ChartEntries
from utils.charts.chart_entry import ChartEntry
from utils.charts.test_sess_charts import TestSessCharts
from utils.test_sess_overall_results import TestSessOverallResults
class AutomatedTestSessGUI(tk.Frame):
def __init__(self,
parent,
enable_test_sess_buttons,
disable_test_sess_buttons):
"""
:param parent:
:param enable_test_sess_buttons:
:param disable_test_sess_buttons:
"""
tk.Frame.__init__(self,
parent)
self._disable_test_sess_buttons = disable_test_sess_buttons
self._enable_test_sess_buttons = enable_test_sess_buttons
self._valid_data_set_selected = False
self._train_session_selected = False
self._test_data_set_path = ''
self._session_details = None
self._sc_scrollable = None
self._f_output = None
self._create_widgets()
self._place_widgets()
#########################################################################
# Widget creation and placement
def _create_and_place_output_frame_and_canvas(self):
if self._sc_scrollable:
self._sc_scrollable.destroy()
del self._sc_scrollable
if self._f_output:
self._f_output.destroy()
del self._f_output
self._f_output = tk.Frame(
self,
)
self._sc_scrollable = ScrollableCanvasC(
parent=self._f_output,
)
self._sc_scrollable.pack(side='top',
fill='both',
expand=True)
self._f_output.pack(side='top',
fill='both',
expand=True)
def _create_widgets(self):
self._create_and_place_output_frame_and_canvas()
self._f_start_btn = tk.Frame(
self._sc_scrollable.f_main_frame,
padx=const.ATS_SUBFRAME_PADX,
pady=const.ATS_SUBFRAME_PADY,
)
self._train_sess_browse = BrowseF(
self._sc_scrollable.f_main_frame,
no_selection_message=const.ATS_TSB_NO_SELECTION,
user_instruction=const.ATS_TSB_USER_INSTRUCTION,
browse_window_title=const.ATS_TSB_WINDOW_TITLE,
supported_files=const.ATS_TSB_SUPPORTED_FILES,
initial_path=const.ATS_TSB_INITIAL_DIRECTORY,
browse_button_eh=self._train_session_selected_eh,
directory=False,
disabled=False
)
self._data_set_browse = ValidatedBrowseF(
self._sc_scrollable.f_main_frame,
no_selection_message=const.ATS_DSB_NO_SELECTION,
user_instruction=const.ATS_DSB_USER_INSTRUCTION,
invalid_message=const.ATS_DSB_INVALID_MESSAGE,
browse_window_title=const.ATS_DSB_WINDOW_TITLE,
initial_directory=const.ATS_DSB_INITIAL_DIRECTORY,
supported_files=const.ATS_DSB_SUPPORTED_FILES,
validation_method=self._validate_data_set_selection,
browse_button_eh=self._data_set_selected_eh,
directory=False,
disabled=True
)
self._train_sess_details_output = TrainSessDetailsOutputF(
parent=self._sc_scrollable.f_main_frame,
disabled=True
)
self._start_button = tk.Button(
self._f_start_btn,
command=self._start_btn_eh,
text=const.ATS_BTN_TEXT,
font=const.ATS_FONT,
padx=const.ATS_START_BTN_PADX,
pady=const.ATS_START_BTN_PADY,
bd=const.ATS_START_BTN_BD,
state='disabled'
)
self._data_set_browse.config(
pady=const.ETS_FRAME_PADY * 2,
)
self._train_sess_browse.config(
pady=const.ETS_FRAME_PADY * 4,
)
def _place_widgets(self):
self._train_sess_browse.pack(side='top',
fill='both',
expand=True)
self._train_sess_details_output.pack(side='top',
fill='both',
expand=True)
self._data_set_browse.pack(side='top',
fill='both',
expand=True)
self._f_start_btn.pack(side='top',
fill='both',
expand=True)
self._start_button.pack(side='top')
#########################################################################
# Event handling
# ~~~~~~~~~~~~~~~~~~~~~Train session browse~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _train_session_selected_eh(
self,
selected_path):
self._train_session_selected = True
self._check_form_validity()
self._session_details = SessionController.read_main_session_file(
session_file_path=selected_path
)
self._data_set_details = DataSetController.read_main_data_set_file(
file_path=self._session_details.main_data_set_file
)
self._train_sess_details_output.update_session_details(
data_set_details=self._data_set_details,
session_details=self._session_details
)
# ~~~~~~~~~~~~~~~~~~~~~Data set browse~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _data_set_selected_eh(
self,
selected_path):
self._valid_data_set_selected = True
self._session_details.current_data_set_path = selected_path
self._check_form_validity()
# ~~~~~~~~~~~~~~~~~~~~~Session buttons~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _start_btn_eh(self):
self._create_and_place_output_frame_and_canvas()
self._test_sess_output = TestSessOutputF(
parent=self._sc_scrollable.f_main_frame
)
self._test_sess_output.confusion_matrix.set_classes(
data_set_classes=self._data_set_details.classes.get_data_set_classes()
)
self._test_sess_output.pack(side='top',
fill='both',
expand=True)
self._disable_test_sess_buttons()
self._train_sess_details_output.destroy()
self._train_sess_browse.destroy()
self._data_set_browse.destroy()
self._start_button.destroy()
self._f_start_btn.destroy()
self._test_session = TestSession(
test_data_set_path=self._test_data_set_path,
session_details=self._session_details,
testing_finished=self.session_ended,
automated_test_session=self
)
self._test_session.start_test()
#########################################################################
# Auxiliary methods
def _validate_data_set_selection(
self,
selected_path):
self._test_data_set_path = selected_path
data_set_details = DataSetController.read_main_data_set_file(
file_path=selected_path
)
return DataSetValidator.data_set_and_session_are_compatible(
session_details=self._session_details,
data_set_details=data_set_details
)
def _check_form_validity(self):
if self._train_session_selected:
self._train_sess_details_output.enable()
self._data_set_browse.enable()
if self._valid_data_set_selected:
self._start_button.config(state='norma')
else:
self._start_button.config(state='disabled')
else:
self._start_button.config(state='disabled')
self._train_sess_details_output.disable()
self._data_set_browse.disable()
def _update_progress(
self,
progress_text: str,
progress_value: int):
"""
- Updates the progress bar.
"""
if not isinstance(progress_value, int) \
or progress_value < 0:
raise ValueError('Value provided '
+ str(progress_value)
+ '. It must be an integer >=0 and <=100.')
if progress_value > 100:
progress_value = 100
self._test_sess_output.progress_bar.update_progress(
percent=progress_value,
text=progress_text
)
def _process_test_results(self, confusion_matrix):
self._test_sess_output.confusion_matrix.set_confusion_matrix(confusion_matrix)
classes = self._data_set_details.classes.get_data_set_classes()
overall_results = TestSessOverallResults()
test_charts = TestSessCharts()
ces_precision = ChartEntries()
ces_f_measure = ChartEntries()
ces_recall = ChartEntries()
avg_precision = 0.
avg_f_measure = 0.
avg_recall = 0.
f_measure = []
precision = []
recall = []
line = []
col = []
for i in range(len(confusion_matrix)):
line.append(0)
col.append(0)
for j in range(len(confusion_matrix)):
line[i] = line[i] + confusion_matrix[i][j]
col[i] = col[i] + confusion_matrix[j][i]
for i in range(len(confusion_matrix)):
recall.append(confusion_matrix[i][i] / col[i] * 100)
avg_recall += recall[i]
precision.append(confusion_matrix[i][i] / line[i] * 100)
avg_precision += precision[i]
if precision[i]+recall[i] != 0:
tmp_recall = 2 \
* (
(precision[i] * recall[i])
/ (precision[i] + recall[i])
)
else:
tmp_recall = 0
f_measure.append(tmp_recall)
avg_f_measure += f_measure[i]
avg_precision = float(avg_precision / len(confusion_matrix))
avg_f_measure = float(avg_f_measure / len(confusion_matrix))
avg_recall = float(avg_recall / len(confusion_matrix))
overall_results.recall = avg_recall
overall_results.f_measure = avg_f_measure
overall_results.precision = avg_precision
self._test_sess_output.overall_results.update_results(
overall_results=overall_results
)
for i in range(len(confusion_matrix)):
# Recall
tmp_recall = ChartEntry()
tmp_recall.identifier = classes[i].class_name
tmp_recall.x = 0
tmp_recall.y = recall[i]
tmp_recall.confidence_interval_95 = 0.
ces_recall.add(new_entry=tmp_recall)
# Precision
tmp_precision = ChartEntry()
tmp_precision.identifier = classes[i].class_name
tmp_precision.x = 0
tmp_precision.y = precision[i]
tmp_precision.confidence_interval_95 = 0.
ces_precision.add(new_entry=tmp_precision)
# F measure
tmp_f_measure = ChartEntry()
tmp_f_measure.identifier = classes[i].class_name
tmp_f_measure.x = 0
tmp_f_measure.y = f_measure[i]
tmp_f_measure.confidence_interval_95 = 0.
ces_f_measure.add(new_entry=tmp_f_measure)
test_charts.set_precision_chart(ces_precision)
test_charts.set_recall_chart(ces_recall)
test_charts.set_f_measure_chart(ces_f_measure)
self._test_sess_output.charts.update_chart_values(test_charts)
#########################################################################
# Public methods
def confusion_matrix_update_method(
self,
confusion_matrix):
self._process_test_results(confusion_matrix)
def session_ended(self):
self._enable_test_sess_buttons()
print('Session ended.')
def update_test_progress(
self,
progress: int):
"""
- Updates the progress bar with the current download percentage.
"""
if progress != 100:
text = 'Testing - ' \
+ str(progress) \
+ '% completed'
else:
text = 'Test session completed! You can check the results.'
self._update_progress(
progress_value=progress,
progress_text=text
)
#########################################################################
| apache-2.0 | 1,835,741,165,412,378,000 | 29.187783 | 90 | 0.534213 | false |
royharoush/rtools | dnmaps.py | 1 | 25943 | #! /usr/bin/env python
#
# DNmap Server - Edited by Justin Warner (@sixdub). Originally written by Sebastian Garcia
# Orginal Copyright and license (included below) applies.
#
# This is the server code to be used in conjunction with Minions, a collaborative distributed
# scanning solution.
#
#
# DNmap Version Modified: .6
# Copyright (C) 2009 Sebastian Garcia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
# Author:
# Sebastian Garcia [email protected]
#
# Based on code from Twisted examples.
# Copyright (c) Twisted Matrix Laboratories.
#
import logging
import logging.handlers
import datetime
import sqlite3
import os
import xml.etree.ElementTree as ET
try:
from twisted.internet.protocol import Factory, Protocol
from twisted.internet import ssl, reactor, task
from twisted.python import log
from twisted.python.logfile import DailyLogFile
except:
print 'You need twisted library. apt-get install python-twisted-bin python-twisted-core'
exit(-1)
import getopt, sys, time, os
try:
from OpenSSL import SSL
except:
print 'You need python openssl library. apt-get install python-openssl'
exit(-1)
# Global variables
vernum='0.6'
nmap_commands_file = ''
nmap_command = []
nmap_commands_sent = []
trace_file = ''
nmap_output_coming_back = False
XML_file= ''
GNmap_file=''
outputswitch=''
file_position = 0
clients = {}
port=8001
clientes = {}
base_dir = os.path.dirname(os.path.abspath(__file__))
output_file=os.path.join(base_dir,"current_output")
log_file=os.path.join(base_dir, "log")
log_level='info'
sql_conn=''
sql_file=''
verbose_level = 2
# 0: quiet
# 1: info, normal
# 2: Statistical table
# 3: debug
# 4: ?
# 5: ?
# This is to assure that the first time we run, something is shown
temp = datetime.datetime.now()
delta = datetime.timedelta(seconds=5)
last_show_time = temp - delta
# defaults to 1 hour
client_timeout = 14400
sort_type = 'Status'
# By default in the same directory
pemfile = os.path.join(base_dir,'server.pem')
cafile = os.path.join(base_dir,'ca.pem')
# End of global variables
# Print version information and exit
def version():
print "+----------------------------------------------------------------------+"
print "| dnmap_server Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print
# Print help information and exit:
def usage():
print "+----------------------------------------------------------------------+"
print "| dnmap_server Version "+ vernum +" |"
print "| This program is free software; you can redistribute it and/or modify |"
print "| it under the terms of the GNU General Public License as published by |"
print "| the Free Software Foundation; either version 2 of the License, or |"
print "| (at your option) any later version. |"
print "| |"
print "| Author: Garcia Sebastian, [email protected] |"
print "| www.mateslab.com.ar |"
print "+----------------------------------------------------------------------+"
print "\nusage: %s <options>" % sys.argv[0]
print "options:"
print " -f, --nmap-commands Nmap commands file"
print " -p, --port TCP port where we listen for connections."
print " -L, --log-file Log file. Defaults to /var/log/dnmap_server.conf."
print " -l, --log-level Log level. Defaults to info."
print " -v, --verbose_level Verbose level. Give a number between 1 and 5. Defaults to 1. Level 0 means be quiet."
print " -t, --client-timeout How many time should we wait before marking a client Offline. We still remember its values just in case it cames back."
print " -s, --sort Field to sort the statical value. You can choose from: Alias, #Commands, UpTime, RunCmdXMin, AvrCmdXMin, Status"
print " -P, --pemfile pem file to use for TLS connection. By default we use the server.pem file provided with the server in the current directory."
print
print "dnmap_server uses a \'<nmap-commands-file-name>.dnmaptrace\' file to know where it must continue reading the nmap commands file. If you want to start over again,"
print "just delete the \'<nmap-commands-file-name>.dnmaptrace\' file"
print
sys.exit(1)
def timeout_idle_clients():
"""
This function search for idle clients and mark them as offline, so we do not display them
"""
global mlog
global verbose_level
global clients
global client_timeout
try:
for client_id in clients:
now = datetime.datetime.now()
time_diff = now - clients[client_id]['LastTime']
if time_diff.seconds >= client_timeout:
clients[client_id]['Status']='Offline'
except Exception as inst:
if verbose_level > 2:
msgline = 'Problem in mark_as_idle function'
mlog.error(msgline)
print msgline
msgline = type(inst)
mlog.error(msgline)
print msgline
msgline = inst.args
mlog.error(msgline)
print msgline
msgline = inst
mlog.error(msgline)
print msgline
def read_file_and_fill_nmap_variable():
""" Here we fill the nmap_command with the lines of the txt file. Only the first time. Later this file should be filled automatically"""
global nmap_commands_file
global nmap_command
global trace_file
global file_position
global mlog
global verbose_level
global sql_conn
global sql_file
with open(nmap_commands_file,'r') as f:
jobs = f.readlines()
#make sure all jobs in file are in queue
for job in jobs:
if not job in nmap_command:
nmap_command.insert(0,job)
mlog.debug('New Job: {0}'.format(job))
#clear queue of things not in jobs file
for job in nmap_command:
if not job in jobs:
nmap_command.remove(job)
return
def verifyCallback(connection, x509, errnum, errdepth, ok):
if not ok:
print "Invalid cert from subject: ",x509.get_subject()
return False
else:
return True
class ServerContextFactory:
global mlog
global verbose_level
global pemfile
global cafile
""" Only to set up SSL"""
def getContext(self):
"""
Create an SSL context.
"""
try:
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(pemfile)
ctx.use_privatekey_file(pemfile)
except:
print "Unexpected error:", sys.exc_info()[0]
print 'You need to have a server.pem file for the server to work'
print pemfile
exit(-1)
# try:
# ctx.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT, verifyCallback)
# ctx.load_verify_locations(cafile)
# except:
# print "Unexpected error:", sys.exc_info()[0]
# print 'You need to have a ca.pem file for the server to work'
# print cafile
# exit(-1)
return ctx
def show_info():
global verbose_level
global mlog
global clients
global last_show_time
global start_time
global sort_type
global output_file
of = open(output_file, "w")
try:
now = datetime.datetime.now()
diff_time = now - start_time
amount = 0
for j in clients:
if clients[j]['Status'] != 'Offline':
amount += 1
if verbose_level > 0:
line = '=| MET:{0} | Amount of Online clients: {1} |='.format(diff_time, amount)
print line
mlog.debug(line)
of.write(line+"\n")
if clients != {}:
if verbose_level > 1:
line = 'Clients connected'
print line
mlog.debug(line)
of.write(line+"\n")
line = '-----------------'
print line
mlog.debug(line)
of.write(line+"\n")
#line = 'Alias\t#Commands\tLast Time Seen\t\t\tVersion\tIsRoot\tStatus'
line = '{0:15}\t{1}\t{2}\t{3}\t{4}\t\t{5}\t{6}\t{7}\t{8}\t{9}'.format('Alias','#Commands','Last Time Seen', '(time ago)', 'UpTime', 'Version', 'IsRoot', 'RunCmdXMin', 'AvrCmdXMin', 'Status')
print line
mlog.debug(line)
of.write(line+"\n")
for i in clients:
if clients[i]['Status'] != 'Offline':
# Strip the name of the day and the year
temp = clients[i]['LastTime'].ctime().split(' ')[1:-1]
lasttime = ''
for j in temp:
lasttime = lasttime + str(j) + ' '
time_diff = datetime.datetime.now() - clients[i]['LastTime']
#time_diff_secs = int(time_diff.total_seconds() % 60)
#time_diff_secs = int(time_diff.seconds % 60)
time_diff_secs = int( (time_diff.seconds + (time_diff.microseconds / 1000000.0) ) % 60)
#time_diff_mins = int(time_diff.total_seconds() / 60)
#time_diff_mins = int(time_diff.seconds / 60)
time_diff_mins = int( (time_diff.seconds + (time_diff.microseconds / 1000000.0) ) / 60)
uptime_diff = datetime.datetime.now() - clients[i]['FirstTime']
#uptime_diff_hours = int(uptime_diff.total_seconds() / 3600)
#uptime_diff_hours = int(uptime_diff.seconds / 3600)
uptime_diff_hours = int( (uptime_diff.seconds + (uptime_diff.microseconds / 1000000.0)) / 3600)
#uptime_diff_mins = int(uptime_diff.total_seconds() % 3600 / 60)
#uptime_diff_mins = int(uptime_diff.seconds % 3600 / 60)
uptime_diff_mins = int( ((uptime_diff.seconds % 3600) + (uptime_diff.microseconds / 1000000.0)) / 60)
line = '{0:15}\t{1}\t\t{2}({3:2d}\'{4:2d}\")\t{5:2d}h{6:2d}m\t\t{7}\t{8}\t{9:10.1f}\t{10:9.1f}\t{11}'.format(clients[i]['Alias'], clients[i]['NbrCommands'], lasttime, time_diff_mins, time_diff_secs, uptime_diff_hours, uptime_diff_mins , clients[i]['Version'], clients[i]['IsRoot'], clients[i]['RunCmdsxMin'], clients[i]['AvrCmdsxMin'], clients[i]['Status'])
print line
mlog.debug(line)
of.write(line+"\n")
print
last_show_time = datetime.datetime.now()
of.close()
except Exception as inst:
if verbose_level > 2:
msgline = 'Problem in show_info function'
mlog.error(msgline)
print msgline
msgline = type(inst)
mlog.error(msgline)
print msgline
msgline = inst.args
mlog.error(msgline)
print msgline
msgline = inst
mlog.error(msgline)
print msgline
def send_one_more_command(ourtransport,client_id):
# Extract the next command to send.
global nmap_command
global verbose_level
global mlog
global clients
global nmap_commands_file
global trace_file
try:
alias = clients[client_id]['Alias']
command_to_send = nmap_command.pop()
line = 'Data sent to client ID '+client_id+' ('+alias+')'
log.msg(line, logLevel=logging.INFO)
if verbose_level > 2:
print line
line= '\t'+command_to_send.strip('\n')
log.msg(line, logLevel=logging.INFO)
if verbose_level > 2:
print line
ourtransport.transport.write(command_to_send)
#remove the cmd from the pending job file and add to trace file
with open(nmap_commands_file, "r") as f:
jobs = f.readlines()
jobs.remove(command_to_send)
with open(nmap_commands_file, "w") as f:
f.writelines(jobs)
#add to tracefile
with open(trace_file, "a+") as f:
f.writelines(command_to_send)
clients[client_id]['NbrCommands'] += 1
clients[client_id]['LastCommand'] = command_to_send
clients[client_id]['Status'] = 'Executing'
except IndexError:
# If the list of commands is empty, look for new commands
line = 'No more commands in queue.'
log.msg(line, logLevel=logging.DEBUG)
if verbose_level > 2:
print line
line = '\tMaking the client '+str(client_id)+' ('+str(alias)+')'+' wait 10 secs for new commands to arrive...'
log.msg(line, logLevel=logging.DEBUG)
if verbose_level > 2:
print line
ourtransport.transport.write('Wait:10')
except Exception as inst:
print 'Problem in Send More Commands'
print type(inst)
print inst.args
print inst
def process_input_line(data,ourtransport,client_id):
global mlog
global verbose_level
global clients
global trace_file
global nmap_command
global nmap_output_coming_back
global nmap_output_file
global xml_output_file
global gnmap_output_file
global outputswitch
try:
# What to do. Send another command or store the nmap output?
if 'Starts the Client ID:' in data:
# No more nmap lines coming back
if nmap_output_coming_back:
nmap_output_coming_back = False
alias = data.split(':')[3].strip('\n').strip('\r').strip(' ')
try:
client_version = data.split(':')[5].strip('\n').strip('\r').strip(' ')
client_isroot = 'False' if data.split(':')[7].strip('\n').strip('\r').strip(' ') == 0 else 'True'
except IndexError:
# It is an old version and it is not sending these data
client_version = '0.1?'
client_isroot = '?'
try:
# Do we have it yet?
value = clients[client_id]['Alias']
# Yes
except KeyError:
# No
clients[client_id] = {}
clients[client_id]['Alias'] = alias
clients[client_id]['FirstTime'] = datetime.datetime.now()
clients[client_id]['LastTime'] = datetime.datetime.now()
clients[client_id]['NbrCommands'] = 0
clients[client_id]['Status'] = 'Online'
clients[client_id]['LastCommand'] = ''
clients[client_id]['Version'] = client_version
clients[client_id]['IsRoot'] = client_isroot
clients[client_id]['RunCmdsxMin'] = 0
clients[client_id]['AvrCmdsxMin'] = 0
msgline = 'Client ID connected: {0} ({1})'.format(str(client_id),str(alias))
log.msg(msgline, logLevel=logging.INFO)
if verbose_level > 1:
print '+ '+msgline
elif 'Send more commands' in data:
alias = clients[client_id]['Alias']
clients[client_id]['Status'] = 'Online'
#nowtime = datetime.datetime.now().ctime()
nowtime = datetime.datetime.now()
clients[client_id]['LastTime'] = nowtime
# No more nmap lines coming back
if nmap_output_coming_back:
nmap_output_coming_back = False
send_one_more_command(ourtransport,client_id)
elif 'Nmap Output File' in data and not nmap_output_coming_back:
# Nmap output start to come back...
nmap_output_coming_back = True
outputswitch=0
alias = clients[client_id]['Alias']
clients[client_id]['Status'] = 'Online'
# compute the commands per hour
# 1 more command. Time is between lasttimeseen and now
time_since_cmd_start = datetime.datetime.now() - clients[client_id]['LastTime']
# Cummulative average
prev_ca = clients[client_id]['AvrCmdsxMin']
#commandsXsec = ( time_since_cmd_start.total_seconds() + (clients[client_id]['NbrCommands'] * prev_ca) ) / ( clients[client_id]['NbrCommands'] + 1 )
#clients[client_id]['RunCmdsxMin'] = cmds_per_min = 60 / time_since_cmd_start.total_seconds()
clients[client_id]['RunCmdsxMin'] = 60 / ( time_since_cmd_start.seconds + ( time_since_cmd_start.microseconds / 1000000.0))
clients[client_id]['AvrCmdsxMin'] = ( clients[client_id]['RunCmdsxMin'] + (clients[client_id]['NbrCommands'] * prev_ca) ) / ( clients[client_id]['NbrCommands'] + 1 )
# update the lasttime
nowtime = datetime.datetime.now()
clients[client_id]['LastTime'] = nowtime
# Create the dir
os.system('mkdir %s/nmap_results > /dev/null 2>&1'%base_dir)
# Get the output file from the data
# We strip \n.
filename = data.split(':')[1].strip('\n')
xml_output_file = "%s/nmap_results/%s.xml"%(base_dir, filename)
nmap_output_file = "%s/nmap_results/%s.nmap"%(base_dir, filename)
gnmap_output_file = "%s/nmap_results/%s.gnmap"%(base_dir, filename)
if verbose_level > 2:
log.msg('\tNmap output file is: {0}'.format(nmap_output_file), logLevel=logging.DEBUG)
clientline = 'Client ID:'+client_id+':Alias:'+alias+"\n"
with open(nmap_output_file, 'a+') as f:
f.writelines(clientline)
with open(xml_output_file, 'a+') as f:
f.writelines(clientline)
with open(gnmap_output_file, 'a+') as f:
f.writelines(clientline)
elif nmap_output_coming_back and 'Nmap Output Finished' not in data:
# Store the output to a file.
alias = clients[client_id]['Alias']
clients[client_id]['Status'] = 'Storing'
#nowtime = datetime.datetime.now().ctime()
nowtime = datetime.datetime.now()
clients[client_id]['LastTime'] = nowtime
#print data
if "#XMLOUTPUT#" in data:
outputswitch=1
elif "#GNMAPOUTPUT#" in data:
outputswitch=2
else:
if outputswitch==0:
with open(nmap_output_file, 'a+') as f:
f.writelines(data+'\n')
elif outputswitch==1:
with open(xml_output_file, 'a+') as f:
f.writelines(data+'\n')
elif outputswitch==2:
with open(gnmap_output_file, 'a+') as f:
f.writelines(data+'\n')
log.msg('\tStoring nmap output for client {0} ({1}).'.format(client_id, alias), logLevel=logging.DEBUG)
elif 'Nmap Output Finished' in data and nmap_output_coming_back:
# Nmap output finished
nmap_output_coming_back = False
alias = clients[client_id]['Alias']
clients[client_id]['Status'] = 'Online'
#nowtime = datetime.datetime.now().ctime()
nowtime = datetime.datetime.now()
clients[client_id]['LastTime'] = nowtime
# Store the finished nmap command in the file, so we can retrieve it if we need...
finished_nmap_command = clients[client_id]['LastCommand']
clients[client_id]['LastCommand'] = ''
#clear out the trace file
with open(trace_file, 'r') as f:
running_jobs = f.readlines()
running_jobs.remove(finished_nmap_command)
with open(trace_file, 'w') as f:
f.writelines(running_jobs)
if verbose_level > 2:
print '+ Storing command {0} in trace file.'.format(finished_nmap_command.strip('\n').strip('\r'))
outputswitch=0
except Exception as inst:
print 'Problem in process input lines'
print type(inst)
print inst.args
print inst
class NmapServerProtocol(Protocol):
""" This is the function that communicates with the client """
global mlog
global verbose_level
global clients
global nmap_command
global mlog
def connectionMade(self):
if verbose_level > 0:
pass
def connectionLost(self, reason):
peerHost = self.transport.getPeer().host
peerPort = str(self.transport.getPeer().port)
client_id = peerHost+':'+peerPort
try:
alias = clients[client_id]['Alias']
except:
msgline = 'No client found in list with id {0}. Moving on...'.format(client_id)
log.msg(msgline, logLevel=logging.INFO)
return 0
clients[client_id]['Status'] = 'Offline'
command_to_redo = clients[client_id]['LastCommand']
if command_to_redo != '':
#readd to job file and queue
nmap_command.append(command_to_redo)
with open(nmap_commands_file, "a+") as f:
f.writelines(command_to_redo)
#clear out the trace file
with open(trace_file, 'r') as f:
running_jobs = f.readlines()
running_jobs.remove(command_to_redo)
with open(trace_file, 'w') as f:
f.writelines(running_jobs)
if verbose_level > 1:
msgline = 'Connection lost in the protocol. Reason:{0}'.format(reason)
msgline2 = '+ Connection lost for {0} ({1}).'.format(alias, client_id)
log.msg(msgline, logLevel=logging.DEBUG)
print msgline2
if verbose_level > 2:
print 'Re inserting command: {0}'.format(command_to_redo)
def dataReceived(self, newdata):
#global client_id
data = newdata.strip('\r').strip('\n').split('\r\n')
peerHost = self.transport.getPeer().host
peerPort = str(self.transport.getPeer().port)
client_id = peerHost+':'+peerPort
# If you need to debug
if verbose_level > 2:
log.msg('Data recived', logLevel=logging.DEBUG)
log.msg(data, logLevel=logging.DEBUG)
print '+ Data received: {0}'.format(data)
for line in data:
process_input_line(line,self,client_id)
def sql_import_loop():
global sql_file
global sql_conn
global mlog
tree=""
#Process all files in the nmap_results directory
path = "%s/nmap_results/"%base_dir
newpath="%s/nmap_results/processed/"%base_dir
try:
os.stat(path)
os.stat(newpath)
except:
os.mkdir(path)
os.mkdir(newpath)
output_files = os.listdir("%s/nmap_results/"%base_dir)
scan_id=""
for ofile in output_files:
complete=path+ofile
if os.path.isfile(complete):
if ofile.endswith(".xml"):
try:
scan_id=ofile.split(".xml")[0]
log.msg("XML File Found: %s"%scan_id, logLevel=logging.INFO)
#take off the first line first, then pass to parser
xmlf = open(complete, "r")
data = xmlf.read()
xmlf.close()
lines = data.split("\n")
log.msg("Importing %s.xml from: %s"%(scan_id,lines[0]), logLevel=logging.INFO)
xmldata = "".join(lines[1:])
tree = ET.fromstring(xmldata)
except:
log.msg(sys.exc_info()[0], logLevel=logging.DEBUG)
raise
os.rename(complete, newpath+ofile)
#connect the DB
sql_conn=sqlite3.connect(sql_file)
c = sql_conn.cursor()
if len(tree)>0:
#get info about the scan
s_version = tree.get("version")
s_summary=""
if not tree.find("runstats").find("finished") == None:
s_summary = tree.find("runstats").find("finished").get("summary")
i=(s_version, s_summary,True,scan_id,)
c.execute('UPDATE scans_scan SET version=?, summary=?, finished=? WHERE id=?', i)
sql_conn.commit()
sql_conn.close()
def process_nmap_commands(logger_name):
""" Main function. Here we set up the environment, factory and port """
global nmap_commands_file
global nmap_command
global port
global mlog
global verbose_level
global client_timeout
observer = log.PythonLoggingObserver(logger_name)
observer.start()
# Create the factory
factory = Factory()
factory.protocol = NmapServerProtocol
# Create the time based print
loop = task.LoopingCall(show_info)
loop.start(5)
# Create the time based file read
loop2 = task.LoopingCall(read_file_and_fill_nmap_variable)
loop2.start(1)
# To mark idel clients as hold
loop3 = task.LoopingCall(timeout_idle_clients)
loop3.start(client_timeout) # call every second
if not sql_file =="":
loop4 = task.LoopingCall(sql_import_loop)
loop4.start(5)
# Create the reactor
reactor.listenSSL(port, factory, ServerContextFactory())
reactor.run()
def main():
global nmap_commands_file
global port
global log_file
global log_level
global mlog
global verbose_level
global start_time
global client_timeout
global sort_type
global pemfile
global cafile
global sql_file
global output_file
global trace_file
start_time = datetime.datetime.now()
try:
opts, args = getopt.getopt(sys.argv[1:], "f:l:L:p:P:c:s:t:v:S:o:", ["nmap-commands=","log-level=","log-server=","port=","pemfile=", "ca-file=","sort-type=","client-timeout=","verbose-level=", "sqlite-file=", "output-file"])
except getopt.GetoptError: usage()
for opt, arg in opts:
if opt in ("-f", "--nmap-commands"): nmap_commands_file=str(arg)
if opt in ("-p", "--port"): port=int(arg)
if opt in ("-l", "--log-level"): log_level=arg
if opt in ("-L", "--log-file"): log_file=arg
if opt in ("-v", "--verbose-level"): verbose_level=int(arg)
if opt in ("-t", "--client-timeout"): client_timeout=int(arg)
if opt in ("-s", "--sort-type"): sort_type=str(arg)
if opt in ("-P", "--pemfile"): pemfile=str(arg)
if opt in ("-c", "--ca-file"): cafile=str(arg)
if opt in ("-S", "--sqlite-file"): sql_file=str(arg)
if opt in ("-o", "--output-file"): output_file=str(arg)
print "Base Dir: %s"%base_dir
try:
# Verify that we have a pem file
try:
temp = os.stat(pemfile)
temp2 = os.stat(cafile)
except OSError:
print 'No pem or cert file given. Use -P or -c'
exit(-1)
if nmap_commands_file != '':
if verbose_level > 0:
version()
# Set up logger
# Set up a specific logger with our desired output level
logger_name = 'MyLogger'
mlog = logging.getLogger(logger_name)
# Set up the log level
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
mlog.setLevel(numeric_level)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(log_file, backupCount=5)
formater = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formater)
mlog.addHandler(handler)
# End logger
#Get any leftover jobs and populate into jobs/queue
trace_file = nmap_commands_file+'.dnmaptrace'
with open(trace_file,'r') as f:
leftover=f.readlines()
with open(nmap_commands_file, 'r') as f:
curjobs=f.readlines()
for ljob in leftover:
if ljob not in curjobs:
with open(nmap_commands_file, 'a+') as f:
f.writelines(ljob)
#clear trace file
with open(trace_file,'w') as f:
f.write("")
# First fill the variable from the file
read_file_and_fill_nmap_variable()
# Start processing clients
process_nmap_commands(logger_name)
else:
usage()
except KeyboardInterrupt:
# CTRL-C pretty handling.
print "Keyboard Interruption!. Exiting."
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-2.0 | 8,240,667,989,885,825,000 | 29.995221 | 363 | 0.653394 | false |
versusvoid/ugly-nlp-magic | learning.py | 1 | 1270 | #!/usr/bin/env python
parts = {}
partition_types = {}
max_len = 4
import sys
filename = 'partitions'
if len(sys.argv) > 1:
filename = sys.argv[1]
α = 1.0
partitions_count = α
f = open(filename)
for l in f:
partition_parts = l.strip().split()
for i, part in enumerate(partition_parts):
if '_' not in part:
print(l, part)
exit(1)
type, value = part.split('_')
value = value.replace('ь', '').replace('ъ', '')
if value == '': continue
part_types = parts.setdefault(value, set()).add(type)
if type != 'корень':
max_len = max(len(value), max_len)
else:
partition_parts[i] = type + '_'
partition_parts = tuple(partition_parts)
partition_types[partition_parts] = partition_types.get(partition_parts, 0.0) + 1.0
partitions_count += 1
f.close()
partitions_count += (len(partition_types) + 1)*α
for partition, count in partition_types.items():
partition_types[partition] = (count + α) / partitions_count
partition_types['__unknown__'] = α / partitions_count
import pickle
f = open('morfology.pick', 'wb')
pickle.dump((parts, partition_types, max_len), f)
f.close()
if '' in parts:
print('aaaaaaaaaaaaa')
exit(100500)
| apache-2.0 | -7,207,247,101,435,700,000 | 24.14 | 86 | 0.603023 | false |
projectshift/shift-schema | shiftschema/validators/multichoice.py | 1 | 1718 | from shiftschema.validators.abstract_validator import AbstractValidator
from shiftschema.result import Error
from shiftschema.exceptions import InvalidOption
class MultiChoice(AbstractValidator):
"""
MultiChoice validator
Accepts a list of values and checks if every item is a valid choice.
"""
invalid_multichoice = '%invalid_multichoice%'
def __init__(self, valid_choices=None, message=None):
"""
Initialize validator
Accepts an iterable of valid choices to check against.
:param min: int or None, minimum length
:param max: int or None, maximum length
:param message: str, custom error message
:return: None
"""
if message is not None:
self.invalid_multichoice = message
try:
iter(valid_choices)
except TypeError:
raise InvalidOption('Choices must be an iterable')
self.choices = valid_choices
def validate(self, value, model=None, context=None):
"""
Validate
Perform value validation against validation settings and return
error object.
:param value: list, value to check
:param model: parent model being validated
:param context: object or None, validation context
:return: shiftschema.result.Error
"""
invalid = [item for item in value if item not in self.choices]
if len(invalid):
return Error(
self.invalid_multichoice,
dict(items=', '.join(invalid))
)
# success otherwise
return Error()
| mit | -8,644,545,313,967,999,000 | 30.236364 | 72 | 0.592549 | false |
jzempel/flask-exceptional | setup.py | 1 | 1495 | """
Flask-Exceptional
-----------------
Adds `Exceptional`_ support to Flask applications.
Links
`````
* `documentation <http://packages.python.org/Flask-Exceptional>`_
* `development version
<http://github.com/jzempel/flask-exceptional/zipball/master#egg=Flask-Exceptional-dev>`_
.. _Exceptional: http://www.exceptional.io/
"""
from setuptools import setup
from sys import argv, version_info
if version_info < (2, 6):
install_requires = ['Flask', 'simplejson >= 1.9.1']
else:
install_requires = ['Flask']
if "develop" in argv:
install_requires.append('Sphinx')
install_requires.append('Sphinx-PyPI-upload')
setup(
name='Flask-Exceptional',
version='0.5.4',
url='http://github.com/jzempel/flask-exceptional',
license='BSD',
author='Jonathan Zempel',
author_email='[email protected]',
description='Adds Exceptional support to Flask applications',
long_description=__doc__,
py_modules=['flask_exceptional'],
zip_safe=False,
platforms='any',
install_requires=install_requires,
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| bsd-3-clause | 7,622,954,091,689,739,000 | 26.685185 | 90 | 0.651505 | false |
hackers-terabit/portage | pym/portage/tests/resolver/ResolverPlayground.py | 1 | 27366 | # Copyright 2010-2015 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from itertools import permutations
import fnmatch
import sys
import tempfile
import portage
from portage import os
from portage import shutil
from portage.const import (GLOBAL_CONFIG_PATH, PORTAGE_BASE_PATH,
USER_CONFIG_PATH)
from portage.dep import Atom, _repo_separator
from portage.package.ebuild.config import config
from portage.package.ebuild.digestgen import digestgen
from portage._sets import load_default_config
from portage._sets.base import InternalPackageSet
from portage.tests import cnf_path
from portage.util import ensure_dirs, normalize_path
from portage.versions import catsplit
import _emerge
from _emerge.actions import calc_depclean
from _emerge.Blocker import Blocker
from _emerge.create_depgraph_params import create_depgraph_params
from _emerge.depgraph import backtrack_depgraph
from _emerge.RootConfig import RootConfig
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
basestring = str
class ResolverPlayground(object):
"""
This class helps to create the necessary files on disk and
the needed settings instances, etc. for the resolver to do
its work.
"""
config_files = frozenset(("eapi", "layout.conf", "make.conf", "package.accept_keywords",
"package.keywords", "package.license", "package.mask", "package.properties",
"package.provided", "packages",
"package.unmask", "package.use", "package.use.aliases", "package.use.stable.mask",
"soname.provided",
"unpack_dependencies", "use.aliases", "use.force", "use.mask", "layout.conf"))
metadata_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
<pkgmetadata>
<maintainer type="person">
<email>[email protected]</email>
<description>Description of the maintainership</description>
</maintainer>
<longdescription>Long description of the package</longdescription>
<use>
%(flags)s
</use>
</pkgmetadata>
"""
def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
user_config={}, sets={}, world=[], world_sets=[], distfiles={},
eprefix=None, targetroot=False, debug=False):
"""
ebuilds: cpv -> metadata mapping simulating available ebuilds.
installed: cpv -> metadata mapping simulating installed packages.
If a metadata key is missing, it gets a default value.
profile: settings defined by the profile.
"""
self.debug = debug
if eprefix is None:
self.eprefix = normalize_path(tempfile.mkdtemp())
else:
self.eprefix = normalize_path(eprefix)
# Tests may override portage.const.EPREFIX in order to
# simulate a prefix installation. It's reasonable to do
# this because tests should be self-contained such that
# the "real" value of portage.const.EPREFIX is entirely
# irrelevant (see bug #492932).
portage.const.EPREFIX = self.eprefix.rstrip(os.sep)
self.eroot = self.eprefix + os.sep
if targetroot:
self.target_root = os.path.join(self.eroot, 'target_root')
else:
self.target_root = os.sep
self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
self.pkgdir = os.path.join(self.eprefix, "pkgdir")
self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
os.makedirs(self.vdbdir)
if not debug:
portage.util.noiselimit = -2
self._repositories = {}
#Make sure the main repo is always created
self._get_repo_dir("test_repo")
self._create_distfiles(distfiles)
self._create_ebuilds(ebuilds)
self._create_binpkgs(binpkgs)
self._create_installed(installed)
self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
self._create_world(world, world_sets)
self.settings, self.trees = self._load_config()
self._create_ebuild_manifests(ebuilds)
portage.util.noiselimit = 0
def reload_config(self):
"""
Reload configuration from disk, which is useful if it has
been modified after the constructor has been called.
"""
for eroot in self.trees:
portdb = self.trees[eroot]["porttree"].dbapi
portdb.close_caches()
self.settings, self.trees = self._load_config()
def _get_repo_dir(self, repo):
"""
Create the repo directory if needed.
"""
if repo not in self._repositories:
if repo == "test_repo":
self._repositories["DEFAULT"] = {"main-repo": repo}
repo_path = os.path.join(self.eroot, "var", "repositories", repo)
self._repositories[repo] = {"location": repo_path}
profile_path = os.path.join(repo_path, "profiles")
try:
os.makedirs(profile_path)
except os.error:
pass
repo_name_file = os.path.join(profile_path, "repo_name")
with open(repo_name_file, "w") as f:
f.write("%s\n" % repo)
return self._repositories[repo]["location"]
def _create_distfiles(self, distfiles):
os.makedirs(self.distdir)
for k, v in distfiles.items():
with open(os.path.join(self.distdir, k), 'wb') as f:
f.write(v)
def _create_ebuilds(self, ebuilds):
for cpv in ebuilds:
a = Atom("=" + cpv, allow_repo=True)
repo = a.repo
if repo is None:
repo = "test_repo"
metadata = ebuilds[cpv].copy()
copyright_header = metadata.pop("COPYRIGHT_HEADER", None)
eapi = metadata.pop("EAPI", "0")
misc_content = metadata.pop("MISC_CONTENT", None)
metadata.setdefault("DEPEND", "")
metadata.setdefault("SLOT", "0")
metadata.setdefault("KEYWORDS", "x86")
metadata.setdefault("IUSE", "")
unknown_keys = set(metadata).difference(
portage.dbapi.dbapi._known_keys)
if unknown_keys:
raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" %
(cpv, sorted(unknown_keys)))
repo_dir = self._get_repo_dir(repo)
ebuild_dir = os.path.join(repo_dir, a.cp)
ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
try:
os.makedirs(ebuild_dir)
except os.error:
pass
with open(ebuild_path, "w") as f:
if copyright_header is not None:
f.write(copyright_header)
f.write('EAPI="%s"\n' % eapi)
for k, v in metadata.items():
f.write('%s="%s"\n' % (k, v))
if misc_content is not None:
f.write(misc_content)
def _create_ebuild_manifests(self, ebuilds):
tmpsettings = config(clone=self.settings)
tmpsettings['PORTAGE_QUIET'] = '1'
for cpv in ebuilds:
a = Atom("=" + cpv, allow_repo=True)
repo = a.repo
if repo is None:
repo = "test_repo"
repo_dir = self._get_repo_dir(repo)
ebuild_dir = os.path.join(repo_dir, a.cp)
ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
portdb = self.trees[self.eroot]["porttree"].dbapi
tmpsettings['O'] = ebuild_dir
if not digestgen(mysettings=tmpsettings, myportdb=portdb):
raise AssertionError('digest creation failed for %s' % ebuild_path)
def _create_binpkgs(self, binpkgs):
# When using BUILD_ID, there can be mutiple instances for the
# same cpv. Therefore, binpkgs may be an iterable instead of
# a dict.
items = getattr(binpkgs, 'items', None)
items = items() if items is not None else binpkgs
for cpv, metadata in items:
a = Atom("=" + cpv, allow_repo=True)
repo = a.repo
if repo is None:
repo = "test_repo"
pn = catsplit(a.cp)[1]
cat, pf = catsplit(a.cpv)
metadata = metadata.copy()
metadata.setdefault("SLOT", "0")
metadata.setdefault("KEYWORDS", "x86")
metadata.setdefault("BUILD_TIME", "0")
metadata["repository"] = repo
metadata["CATEGORY"] = cat
metadata["PF"] = pf
repo_dir = self.pkgdir
category_dir = os.path.join(repo_dir, cat)
if "BUILD_ID" in metadata:
binpkg_path = os.path.join(category_dir, pn,
"%s-%s.xpak"% (pf, metadata["BUILD_ID"]))
else:
binpkg_path = os.path.join(category_dir, pf + ".tbz2")
ensure_dirs(os.path.dirname(binpkg_path))
t = portage.xpak.tbz2(binpkg_path)
t.recompose_mem(portage.xpak.xpak_mem(metadata))
def _create_installed(self, installed):
for cpv in installed:
a = Atom("=" + cpv, allow_repo=True)
repo = a.repo
if repo is None:
repo = "test_repo"
vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
try:
os.makedirs(vdb_pkg_dir)
except os.error:
pass
metadata = installed[cpv].copy()
metadata.setdefault("SLOT", "0")
metadata.setdefault("BUILD_TIME", "0")
metadata.setdefault("COUNTER", "0")
metadata.setdefault("KEYWORDS", "~x86")
unknown_keys = set(metadata).difference(
portage.dbapi.dbapi._known_keys)
unknown_keys.discard("BUILD_TIME")
unknown_keys.discard("BUILD_ID")
unknown_keys.discard("COUNTER")
unknown_keys.discard("repository")
unknown_keys.discard("USE")
unknown_keys.discard("PROVIDES")
unknown_keys.discard("REQUIRES")
if unknown_keys:
raise ValueError("metadata of installed '%s' contains unknown keys: %s" %
(cpv, sorted(unknown_keys)))
metadata["repository"] = repo
for k, v in metadata.items():
with open(os.path.join(vdb_pkg_dir, k), "w") as f:
f.write("%s\n" % v)
def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)
try:
os.makedirs(user_config_dir)
except os.error:
pass
for repo in self._repositories:
if repo == "DEFAULT":
continue
repo_dir = self._get_repo_dir(repo)
profile_dir = os.path.join(repo_dir, "profiles")
metadata_dir = os.path.join(repo_dir, "metadata")
os.makedirs(metadata_dir)
#Create $REPO/profiles/categories
categories = set()
for cpv in ebuilds:
ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
if ebuilds_repo is None:
ebuilds_repo = "test_repo"
if ebuilds_repo == repo:
categories.add(catsplit(cpv)[0])
categories_file = os.path.join(profile_dir, "categories")
with open(categories_file, "w") as f:
for cat in categories:
f.write(cat + "\n")
#Create $REPO/profiles/license_groups
license_file = os.path.join(profile_dir, "license_groups")
with open(license_file, "w") as f:
f.write("EULA TEST\n")
repo_config = repo_configs.get(repo)
if repo_config:
for config_file, lines in repo_config.items():
if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
raise ValueError("Unknown config file: '%s'" % config_file)
if config_file in ("layout.conf",):
file_name = os.path.join(repo_dir, "metadata", config_file)
else:
file_name = os.path.join(profile_dir, config_file)
if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
os.makedirs(os.path.dirname(file_name))
with open(file_name, "w") as f:
for line in lines:
f.write("%s\n" % line)
# Temporarily write empty value of masters until it becomes default.
# TODO: Delete all references to "# use implicit masters" when empty value becomes default.
if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines):
f.write("masters =\n")
#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
os.makedirs(os.path.join(repo_dir, "eclass"))
# Temporarily write empty value of masters until it becomes default.
if not repo_config or "layout.conf" not in repo_config:
layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
with open(layout_conf_path, "w") as f:
f.write("masters =\n")
if repo == "test_repo":
#Create a minimal profile in /usr/portage
sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
os.makedirs(sub_profile_dir)
if not (profile and "eapi" in profile):
eapi_file = os.path.join(sub_profile_dir, "eapi")
with open(eapi_file, "w") as f:
f.write("0\n")
make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
with open(make_defaults_file, "w") as f:
f.write("ARCH=\"x86\"\n")
f.write("ACCEPT_KEYWORDS=\"x86\"\n")
use_force_file = os.path.join(sub_profile_dir, "use.force")
with open(use_force_file, "w") as f:
f.write("x86\n")
parent_file = os.path.join(sub_profile_dir, "parent")
with open(parent_file, "w") as f:
f.write("..\n")
if profile:
for config_file, lines in profile.items():
if config_file not in self.config_files:
raise ValueError("Unknown config file: '%s'" % config_file)
file_name = os.path.join(sub_profile_dir, config_file)
with open(file_name, "w") as f:
for line in lines:
f.write("%s\n" % line)
#Create profile symlink
os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))
make_conf = {
"ACCEPT_KEYWORDS": "x86",
"CLEAN_DELAY": "0",
"DISTDIR" : self.distdir,
"EMERGE_WARNING_DELAY": "0",
"PKGDIR": self.pkgdir,
"PORTAGE_INST_GID": str(portage.data.portage_gid),
"PORTAGE_INST_UID": str(portage.data.portage_uid),
"PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
}
if os.environ.get("NOCOLOR"):
make_conf["NOCOLOR"] = os.environ["NOCOLOR"]
# Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
# need to be inherited by ebuild subprocesses.
if 'PORTAGE_USERNAME' in os.environ:
make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
if 'PORTAGE_GRPNAME' in os.environ:
make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
make_conf_lines = []
for k_v in make_conf.items():
make_conf_lines.append('%s="%s"' % k_v)
if "make.conf" in user_config:
make_conf_lines.extend(user_config["make.conf"])
if not portage.process.sandbox_capable or \
os.environ.get("SANDBOX_ON") == "1":
# avoid problems from nested sandbox instances
make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')
configs = user_config.copy()
configs["make.conf"] = make_conf_lines
for config_file, lines in configs.items():
if config_file not in self.config_files:
raise ValueError("Unknown config file: '%s'" % config_file)
file_name = os.path.join(user_config_dir, config_file)
with open(file_name, "w") as f:
for line in lines:
f.write("%s\n" % line)
#Create /usr/share/portage/config/make.globals
make_globals_path = os.path.join(self.eroot,
GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
ensure_dirs(os.path.dirname(make_globals_path))
os.symlink(os.path.join(cnf_path, "make.globals"),
make_globals_path)
#Create /usr/share/portage/config/sets/portage.conf
default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
try:
os.makedirs(default_sets_conf_dir)
except os.error:
pass
provided_sets_portage_conf = (
os.path.join(cnf_path, "sets", "portage.conf"))
os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))
set_config_dir = os.path.join(user_config_dir, "sets")
try:
os.makedirs(set_config_dir)
except os.error:
pass
for sets_file, lines in sets.items():
file_name = os.path.join(set_config_dir, sets_file)
with open(file_name, "w") as f:
for line in lines:
f.write("%s\n" % line)
def _create_world(self, world, world_sets):
#Create /var/lib/portage/world
var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
os.makedirs(var_lib_portage)
world_file = os.path.join(var_lib_portage, "world")
world_set_file = os.path.join(var_lib_portage, "world_sets")
with open(world_file, "w") as f:
for atom in world:
f.write("%s\n" % atom)
with open(world_set_file, "w") as f:
for atom in world_sets:
f.write("%s\n" % atom)
def _load_config(self):
create_trees_kwargs = {}
if self.target_root != os.sep:
create_trees_kwargs["target_root"] = self.target_root
env = {
"PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
}
trees = portage.create_trees(env=env, eprefix=self.eprefix,
**create_trees_kwargs)
for root, root_trees in trees.items():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
return trees[trees._target_eroot]["vartree"].settings, trees
def run(self, atoms, options={}, action=None):
options = options.copy()
options["--pretend"] = True
if self.debug:
options["--debug"] = True
if action is None:
if options.get("--depclean"):
action = "depclean"
elif options.get("--prune"):
action = "prune"
if "--usepkgonly" in options:
options["--usepkg"] = True
global_noiselimit = portage.util.noiselimit
global_emergelog_disable = _emerge.emergelog._disable
try:
if not self.debug:
portage.util.noiselimit = -2
_emerge.emergelog._disable = True
if action in ("depclean", "prune"):
rval, cleanlist, ordered, req_pkg_count = \
calc_depclean(self.settings, self.trees, None,
options, action, InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
result = ResolverPlaygroundDepcleanResult(
atoms, rval, cleanlist, ordered, req_pkg_count)
else:
params = create_depgraph_params(options, action)
success, depgraph, favorites = backtrack_depgraph(
self.settings, self.trees, options, params, action, atoms, None)
depgraph._show_merge_list()
depgraph.display_problems()
result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
finally:
portage.util.noiselimit = global_noiselimit
_emerge.emergelog._disable = global_emergelog_disable
return result
def run_TestCase(self, test_case):
if not isinstance(test_case, ResolverPlaygroundTestCase):
raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
for atoms in test_case.requests:
result = self.run(atoms, test_case.options, test_case.action)
if not test_case.compare_with_result(result):
return
def cleanup(self):
for eroot in self.trees:
portdb = self.trees[eroot]["porttree"].dbapi
portdb.close_caches()
if self.debug:
print("\nEROOT=%s" % self.eroot)
else:
shutil.rmtree(self.eroot)
class ResolverPlaygroundTestCase(object):
def __init__(self, request, **kwargs):
self.all_permutations = kwargs.pop("all_permutations", False)
self.ignore_mergelist_order = kwargs.pop("ignore_mergelist_order", False)
self.ambiguous_merge_order = kwargs.pop("ambiguous_merge_order", False)
self.ambiguous_slot_collision_solutions = kwargs.pop("ambiguous_slot_collision_solutions", False)
self.check_repo_names = kwargs.pop("check_repo_names", False)
self.merge_order_assertions = kwargs.pop("merge_order_assertions", False)
if self.all_permutations:
self.requests = list(permutations(request))
else:
self.requests = [request]
self.options = kwargs.pop("options", {})
self.action = kwargs.pop("action", None)
self.test_success = True
self.fail_msg = None
self._checks = kwargs.copy()
def compare_with_result(self, result):
checks = dict.fromkeys(result.checks)
for key, value in self._checks.items():
if not key in checks:
raise KeyError("Not an available check: '%s'" % key)
checks[key] = value
fail_msgs = []
for key, value in checks.items():
got = getattr(result, key)
expected = value
if key in result.optional_checks and expected is None:
continue
if key == "mergelist":
if not self.check_repo_names:
#Strip repo names if we don't check them
if got:
new_got = []
for cpv in got:
if cpv[:1] == "!":
new_got.append(cpv)
continue
new_got.append(cpv.split(_repo_separator)[0])
got = new_got
if expected:
new_expected = []
for obj in expected:
if isinstance(obj, basestring):
if obj[:1] == "!":
new_expected.append(obj)
continue
new_expected.append(
obj.split(_repo_separator)[0])
continue
new_expected.append(set())
for cpv in obj:
if cpv[:1] != "!":
cpv = cpv.split(_repo_separator)[0]
new_expected[-1].add(cpv)
expected = new_expected
if self.ignore_mergelist_order and got is not None:
got = set(got)
expected = set(expected)
if self.ambiguous_merge_order and got:
expected_stack = list(reversed(expected))
got_stack = list(reversed(got))
new_expected = []
match = True
while got_stack and expected_stack:
got_token = got_stack.pop()
expected_obj = expected_stack.pop()
if isinstance(expected_obj, basestring):
new_expected.append(expected_obj)
if got_token == expected_obj:
continue
# result doesn't match, so stop early
match = False
break
expected_obj = set(expected_obj)
try:
expected_obj.remove(got_token)
except KeyError:
# result doesn't match, so stop early
match = False
break
new_expected.append(got_token)
while got_stack and expected_obj:
got_token = got_stack.pop()
try:
expected_obj.remove(got_token)
except KeyError:
match = False
break
new_expected.append(got_token)
if not match:
# result doesn't match, so stop early
break
if expected_obj:
# result does not match, so stop early
match = False
new_expected.append(tuple(expected_obj))
break
if expected_stack:
# result does not match, add leftovers to new_expected
match = False
expected_stack.reverse()
new_expected.extend(expected_stack)
expected = new_expected
if match and self.merge_order_assertions:
for node1, node2 in self.merge_order_assertions:
if not (got.index(node1) < got.index(node2)):
fail_msgs.append("atoms: (" + \
", ".join(result.atoms) + "), key: " + \
("merge_order_assertions, expected: %s" % \
str((node1, node2))) + \
", got: " + str(got))
elif key == "slot_collision_solutions" and \
self.ambiguous_slot_collision_solutions:
# Tests that use all_permutations can have multiple
# outcomes here.
for x in expected:
if x == got:
expected = x
break
elif key in ("unstable_keywords", "needed_p_mask_changes",
"unsatisfied_deps", "required_use_unsatisfied") and \
expected is not None:
expected = set(expected)
elif key == "forced_rebuilds" and expected is not None:
expected = dict((k, set(v)) for k, v in expected.items())
if got != expected:
fail_msgs.append("atoms: (" + ", ".join(result.atoms) + "), key: " + \
key + ", expected: " + str(expected) + ", got: " + str(got))
if fail_msgs:
self.test_success = False
self.fail_msg = "\n".join(fail_msgs)
return False
return True
class ResolverPlaygroundResult(object):
checks = (
"success", "mergelist", "use_changes", "license_changes",
"unstable_keywords", "slot_collision_solutions",
"circular_dependency_solutions", "needed_p_mask_changes",
"unsatisfied_deps", "forced_rebuilds", "required_use_unsatisfied"
)
optional_checks = (
"forced_rebuilds",
"required_use_unsatisfied",
"unsatisfied_deps"
)
def __init__(self, atoms, success, mydepgraph, favorites):
self.atoms = atoms
self.success = success
self.depgraph = mydepgraph
self.favorites = favorites
self.mergelist = None
self.use_changes = None
self.license_changes = None
self.unstable_keywords = None
self.needed_p_mask_changes = None
self.slot_collision_solutions = None
self.circular_dependency_solutions = None
self.unsatisfied_deps = frozenset()
self.forced_rebuilds = None
self.required_use_unsatisfied = None
if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
self.mergelist = []
host_root = self.depgraph._frozen_config._running_root.root
for x in self.depgraph._dynamic_config._serialized_tasks_cache:
if isinstance(x, Blocker):
self.mergelist.append(x.atom)
else:
repo_str = ""
if x.repo != "test_repo":
repo_str = _repo_separator + x.repo
build_id_str = ""
if (x.type_name == "binary" and
x.cpv.build_id is not None):
build_id_str = "-%s" % x.cpv.build_id
mergelist_str = x.cpv + build_id_str + repo_str
if x.built:
if x.operation == "merge":
desc = x.type_name
else:
desc = x.operation
mergelist_str = "[%s]%s" % (desc, mergelist_str)
if x.root != host_root:
mergelist_str += "{targetroot}"
self.mergelist.append(mergelist_str)
if self.depgraph._dynamic_config._needed_use_config_changes:
self.use_changes = {}
for pkg, needed_use_config_changes in \
self.depgraph._dynamic_config._needed_use_config_changes.items():
new_use, changes = needed_use_config_changes
self.use_changes[pkg.cpv] = changes
if self.depgraph._dynamic_config._needed_unstable_keywords:
self.unstable_keywords = set()
for pkg in self.depgraph._dynamic_config._needed_unstable_keywords:
self.unstable_keywords.add(pkg.cpv)
if self.depgraph._dynamic_config._needed_p_mask_changes:
self.needed_p_mask_changes = set()
for pkg in self.depgraph._dynamic_config._needed_p_mask_changes:
self.needed_p_mask_changes.add(pkg.cpv)
if self.depgraph._dynamic_config._needed_license_changes:
self.license_changes = {}
for pkg, missing_licenses in self.depgraph._dynamic_config._needed_license_changes.items():
self.license_changes[pkg.cpv] = missing_licenses
if self.depgraph._dynamic_config._slot_conflict_handler is not None:
self.slot_collision_solutions = []
handler = self.depgraph._dynamic_config._slot_conflict_handler
for change in handler.changes:
new_change = {}
for pkg in change:
new_change[pkg.cpv] = change[pkg]
self.slot_collision_solutions.append(new_change)
if self.depgraph._dynamic_config._circular_dependency_handler is not None:
handler = self.depgraph._dynamic_config._circular_dependency_handler
sol = handler.solutions
self.circular_dependency_solutions = dict(zip([x.cpv for x in sol.keys()], sol.values()))
if self.depgraph._dynamic_config._unsatisfied_deps_for_display:
self.unsatisfied_deps = set(dep_info[0][1]
for dep_info in self.depgraph._dynamic_config._unsatisfied_deps_for_display)
if self.depgraph._forced_rebuilds:
self.forced_rebuilds = dict(
(child.cpv, set(parent.cpv for parent in parents))
for child_dict in self.depgraph._forced_rebuilds.values()
for child, parents in child_dict.items())
required_use_unsatisfied = []
for pargs, kwargs in \
self.depgraph._dynamic_config._unsatisfied_deps_for_display:
if "show_req_use" in kwargs:
required_use_unsatisfied.append(pargs[1])
if required_use_unsatisfied:
self.required_use_unsatisfied = set(required_use_unsatisfied)
class ResolverPlaygroundDepcleanResult(object):
checks = (
"success", "cleanlist", "ordered", "req_pkg_count",
)
optional_checks = (
"ordered", "req_pkg_count",
)
def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count):
self.atoms = atoms
self.success = rval == 0
self.cleanlist = cleanlist
self.ordered = ordered
self.req_pkg_count = req_pkg_count
| gpl-2.0 | 1,110,860,997,439,947,800 | 32.090689 | 182 | 0.670906 | false |
krull/docker-zenoss4 | init_fs/usr/local/zenoss/ZenPacks/ZenPacks.zenoss.Microsoft.Windows-2.6.9.egg/ZenPacks/zenoss/Microsoft/Windows/tests/testWinMSSQL.py | 1 | 5139 | ##############################################################################
#
# Copyright (C) Zenoss, Inc. 2014, all rights reserved.
#
# This content is made available according to terms specified in
# License.zenoss under the directory where your Zenoss product is installed.
#
##############################################################################
from ZenPacks.zenoss.Microsoft.Windows.tests.mock import Mock
from ZenPacks.zenoss.Microsoft.Windows.tests.utils import StringAttributeObject
from Products.DataCollector.plugins.DataMaps import ObjectMap
from Products.ZenTestCase.BaseTestCase import BaseTestCase
from ZenPacks.zenoss.Microsoft.Windows.modeler.plugins.zenoss.winrm.WinMSSQL import WinMSSQL
RESULTS = dict(clear='Error parsing zDBInstances',
device=ObjectMap(data=dict(sqlhostname='dbhost0')),
instances=[ObjectMap(data=dict(
id='RTC$instance',
instancename='RTC',
title='RTC'))],
jobs=[ObjectMap(data=dict(
cluster_node_server='//dbhost0\\RTC',
datecreated=' 5/26/2014 00:00:01 PM',
description='No description available.',
enabled='Yes',
id='aaee26a6-7970-4ffb-be57-cd49d0084c2d',
instancename='RTC$instance',
jobid='aaee26a6-7970-4ffb-be57-cd49d0084c2d',
title='syspolicy_purge_history',
username='sa'))],
backups=[ObjectMap(data=dict(
devicetype='Disk',
id='RTC$instancertc_rtc',
instancename='RTC$instance',
physicallocation='c:\\Backup\\rtc.bak',
status='Existing',
title='rtc_rtc'))],
databases=[ObjectMap(data=dict(
cluster_node_server='//dbhost0\\RTC',
collation='Latin1_General_BIN',
createdate='5/26/2014 7:47:57 PM',
defaultfilegroup='PRIMARY',
id='RTC$instance12',
instancename='RTC$instance',
isaccessible='True',
lastbackupdate=None,
lastlogbackupdate=None,
owner='sa',
primaryfilepath='C:\\rtc\\DbPath',
recoverymodel='Simple',
systemobject='False',
title='db0',
version='706'))]
)
class TestProcesses(BaseTestCase):
def setUp(self):
self.plugin = WinMSSQL()
self.device = StringAttributeObject()
def test_process(self):
data = self.plugin.process(self.device, RESULTS, Mock())
self.assertEquals(len(data), 5)
self.assertEquals(data[0].sqlhostname, 'dbhost0')
# import pdb
# pdb.set_trace()
self.assertEquals(data[0].sqlhostname, 'dbhost0')
self.assertEquals(data[1].maps[0].title, 'RTC')
self.assertEquals(data[1].maps[0].instancename, 'RTC')
self.assertEquals(data[2].maps[0].id, 'RTC$instancertc_rtc')
self.assertEquals(data[2].maps[0].devicetype, 'Disk')
self.assertEquals(data[2].maps[0].instancename, 'RTC$instance')
self.assertEquals(data[2].maps[0].physicallocation, 'c:\\Backup\\rtc.bak')
self.assertEquals(data[2].maps[0].status, 'Existing')
self.assertEquals(data[2].maps[0].title, 'rtc_rtc')
self.assertEquals(data[3].maps[0].cluster_node_server, '//dbhost0\\RTC')
self.assertEquals(data[3].maps[0].datecreated, ' 5/26/2014 00:00:01 PM')
self.assertEquals(data[3].maps[0].description, 'No description available.')
self.assertEquals(data[3].maps[0].enabled, 'Yes')
self.assertEquals(data[3].maps[0].id, 'aaee26a6-7970-4ffb-be57-cd49d0084c2d')
self.assertEquals(data[3].maps[0].instancename, 'RTC$instance')
self.assertEquals(data[3].maps[0].jobid, 'aaee26a6-7970-4ffb-be57-cd49d0084c2d')
self.assertEquals(data[3].maps[0].title, 'syspolicy_purge_history')
self.assertEquals(data[3].maps[0].username, 'sa')
self.assertEquals(data[4].maps[0].cluster_node_server, '//dbhost0\\RTC')
self.assertEquals(data[4].maps[0].collation, 'Latin1_General_BIN')
self.assertEquals(data[4].maps[0].createdate, '5/26/2014 7:47:57 PM')
self.assertEquals(data[4].maps[0].defaultfilegroup, 'PRIMARY')
self.assertEquals(data[4].maps[0].id, 'RTC$instance12')
self.assertEquals(data[4].maps[0].instancename, 'RTC$instance')
self.assertEquals(data[4].maps[0].isaccessible, 'True')
self.assertIsNone(data[4].maps[0].lastbackupdate)
self.assertEquals(data[4].maps[0].owner, 'sa')
self.assertEquals(data[4].maps[0].primaryfilepath, 'C:\\rtc\\DbPath')
self.assertEquals(data[4].maps[0].recoverymodel, 'Simple')
self.assertEquals(data[4].maps[0].systemobject, 'False')
self.assertEquals(data[4].maps[0].title, 'db0')
self.assertEquals(data[4].maps[0].version, '706')
| gpl-3.0 | -6,214,625,924,517,370,000 | 47.028037 | 92 | 0.581436 | false |
awamper/draobpilc | draobpilc/processors/merger.py | 1 | 8353 | #!/usr/bin/env python3
# Copyright 2015 Ivan [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from gi.repository import Gtk
from gi.repository import GObject
from draobpilc import common
from draobpilc.processors.processor_textwindow import TextWindow
from draobpilc.widgets.items_processor_base import (
ItemsProcessorBase,
ItemsProcessorPriority
)
COUNTER_LABEL_TPL = (
'<span size="xx-large">%s</span>' % _('Merge <b>%i</b> items.')
)
COMBOBOX_NONE_STRING = 'Draobpilc.Merger.ComboBoxText.Id == None'
class Merger(ItemsProcessorBase):
__gsignals__ = {
'merge': (GObject.SIGNAL_RUN_FIRST, None, (object, bool))
}
def __init__(self):
super().__init__(_('Merge'), ItemsProcessorPriority.HIGHEST)
self._counter_label = Gtk.Label()
self._counter_label.set_markup(COUNTER_LABEL_TPL % 0)
self._counter_label.set_hexpand(True)
self._counter_label.set_vexpand(False)
self._counter_label.set_valign(Gtk.Align.CENTER)
self._counter_label.set_halign(Gtk.Align.CENTER)
self._decorator_label = Gtk.Label()
self._decorator_label.props.margin = ItemsProcessorBase.MARGIN
self._decorator_label.set_label(_('Decorator'))
self._decorator_combo = Gtk.ComboBoxText.new_with_entry()
self._decorator_combo.connect('changed', lambda c: self.update())
self._decorator_combo.props.margin = ItemsProcessorBase.MARGIN
self._separator_label = Gtk.Label()
self._separator_label.props.margin = ItemsProcessorBase.MARGIN
self._separator_label.set_label(_('Separator'))
self._separator_combo = Gtk.ComboBoxText.new_with_entry()
self._separator_combo.connect('changed', lambda c: self.update())
self._separator_combo.props.margin = ItemsProcessorBase.MARGIN
self._text_window = TextWindow()
self._text_window.textview.set_name('MergerTextView')
self._merge_btn = Gtk.Button()
self._merge_btn.set_label(_('Merge'))
self._merge_btn.connect(
'clicked',
lambda b: self.emit('merge', self.items, False)
)
self._merge_del_btn = Gtk.Button()
self._merge_del_btn.set_label(_('Merge & Delete'))
self._merge_del_btn.set_tooltip_text(
_('Merge and delete merged items')
)
self._merge_del_btn.connect(
'clicked',
lambda b: self.emit('merge', self.items, True)
)
self._reverse_order_btn = Gtk.CheckButton(_('Reverse order'))
self._reverse_order_btn.props.margin = ItemsProcessorBase.MARGIN
self._reverse_order_btn.set_active(False)
self._reverse_order_btn.connect('toggled', lambda b: self.update())
buttons_box = Gtk.ButtonBox()
buttons_box.set_layout(Gtk.ButtonBoxStyle.EXPAND)
buttons_box.props.margin = ItemsProcessorBase.MARGIN
buttons_box.add(self._merge_del_btn)
buttons_box.add(self._merge_btn)
self.grid.set_name('MergerBox')
self.grid.attach(self._counter_label, 0, 1, 2, 1)
self.grid.attach(self._decorator_label, 0, 2, 1, 1)
self.grid.attach(self._decorator_combo, 0, 3, 1, 1)
self.grid.attach(self._separator_label, 1, 2, 1, 1)
self.grid.attach(self._separator_combo, 1, 3, 1, 1)
self.grid.attach(self._text_window, 0, 4, 2, 1)
self.grid.attach(self._reverse_order_btn, 0, 5, 2, 1)
self.grid.attach(buttons_box, 0, 6, 2, 1)
common.SETTINGS.connect(
'changed::' + common.MERGE_DEFAULT_DECORATOR,
self._on_settings_changed
)
common.SETTINGS.connect(
'changed::' + common.MERGE_DEFAULT_SEPARATOR,
self._on_settings_changed
)
common.SETTINGS.connect(
'changed::' + common.MERGE_DECORATORS,
lambda s, k: self._update_merge_data()
)
common.SETTINGS.connect(
'changed::' + common.MERGE_SEPARATORS,
lambda s, k: self._update_merge_data()
)
self._update_merge_data()
def _on_settings_changed(self, settings, key):
if key == common.MERGE_DEFAULT_DECORATOR:
combo = self._decorator_combo
else:
combo = self._separator_combo
if not settings[key]:
combo.set_active_id(COMBOBOX_NONE_STRING)
else:
combo.set_active_id(settings[key])
def _update_merge_data(self):
self._decorator_combo.remove_all()
self._separator_combo.remove_all()
decorators = json.loads(common.SETTINGS[common.MERGE_DECORATORS])
decorators.append([_('None'), COMBOBOX_NONE_STRING])
for decorator in decorators:
self._decorator_combo.append(decorator[1], decorator[0])
default_decorator = common.SETTINGS[common.MERGE_DEFAULT_DECORATOR]
if not default_decorator:
self._decorator_combo.set_active_id(COMBOBOX_NONE_STRING)
else:
self._decorator_combo.set_active_id(default_decorator)
separators = json.loads(common.SETTINGS[common.MERGE_SEPARATORS])
separators.append([_('None'), COMBOBOX_NONE_STRING])
for separator in separators:
self._separator_combo.append(separator[1], separator[0])
default_separator = common.SETTINGS[common.MERGE_DEFAULT_SEPARATOR]
if not default_separator:
self._separator_combo.set_active_id(COMBOBOX_NONE_STRING)
else:
self._separator_combo.set_active_id(default_separator)
def _get_merged_text(self):
def get_decorator():
decorator = self._decorator_combo.get_active_id()
if decorator == COMBOBOX_NONE_STRING:
decorator = ''
elif not decorator:
decorator = self._decorator_combo.get_active_text()
try:
decorator = decorator.encode('utf8').decode('unicode-escape')
except UnicodeDecodeError:
pass
return decorator
def get_separator():
separator = self._separator_combo.get_active_id()
if separator == COMBOBOX_NONE_STRING:
separator = ''
elif not separator:
separator = self._separator_combo.get_active_text()
try:
separator = separator.encode('utf8').decode('unicode-escape')
except UnicodeDecodeError:
pass
return separator
result = ''
merge_items = self.items
if self._reverse_order_btn.get_active():
merge_items = list(reversed(merge_items))
for i, item in enumerate(merge_items):
decorator = get_decorator()
separator = get_separator()
result += decorator + item.raw + decorator
if i < len(merge_items) - 1: result += separator
return result
def update(self):
self._counter_label.set_markup(
COUNTER_LABEL_TPL % len(self.items)
)
if len(self.items) < 2:
self.buffer.set_text('')
else:
preview = self._get_merged_text()
self.buffer.set_text(preview)
def set_items(self, items):
super().set_items(items)
self.update()
def clear(self):
super().clear()
self._reverse_order_btn.set_active(False)
self._update_merge_data()
self.update()
def can_process(self, items):
if len(items) > 1:
return True
else:
return False
@property
def buffer(self):
return self._text_window.textview.props.buffer
| gpl-3.0 | -8,984,910,460,628,982,000 | 33.804167 | 81 | 0.611517 | false |
8l/beri | cheritest/trunk/tests/cp2/test_cp2_x_clc_align.py | 1 | 2168 | #-
# Copyright (c) 2012 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
#
# Test that clc raises an exception if the address from which the capability
# is to be loaded is not aligned on a 32-byte boundary.
#
class test_cp2_x_clc_align(BaseBERITestCase):
@attr('capabilities')
def test_cp2_x_clc_align_1(self):
'''Test clc did not load from an unaligned address'''
self.assertRegisterEqual(self.MIPS.a0, 0,
"clc loaded from an unaligned address")
@attr('capabilities')
def test_cp2_x_clc_align_2(self):
'''Test clc raises an exception when the address is unaligned'''
self.assertRegisterEqual(self.MIPS.a2, 1,
"clc did not raise an exception when the address was unaligned")
@attr('capabilities')
def test_cp2_x_clc_align_3(self):
'''Test CP0 cause register was set correctly when address was unaligned'''
self.assertRegisterEqual(self.MIPS.a3, 4*4,
"CP0 status was not set to AdEL when the address was unaligned")
| apache-2.0 | 7,085,144,228,318,819,000 | 39.90566 | 82 | 0.726476 | false |
L0st1nC0d3/TextAdventure | room.py | 1 | 2058 | import json
import sqlite3
from tkinter import *
def get_room(i2d, dbname):
ret = None
con = sqlite3.connect(dbname)
for row in con.execute("select description from rooms where id=?", (i2d,)):
jsontext = row[0]
d = json.loads(jsontext)
d['id'] = i2d
ret = Room(**d)
break
con.close()
return ret
class Room:
def __init__(self, id=0, name='A Room', description='An empty room', neighbors={}, items={}, npc={}, npcis={}):
self.id = id
self.name = name
self.description = description
self.neighbors = neighbors
self.items = items
self.npc = npc
self.npcis = npcis
def _neighbor(self, direction):
if direction in self.neighbors:
return self.neighbors[direction]
else:
return None
def north(self):
return self._neighbor('n')
def south(self):
return self._neighbor('s')
def east(self):
return self._neighbor('e')
def west(self):
return self._neighbor('w')
def up(self):
return self._neighbor('up')
def dw(self):
return self._neighbor('dw')
def show_item(self, character, txar):
txar.insert(END, "\n")
for i in range(len(self.items)):
if (self.items[i] not in character.items) and (self.items[i] not in character.used):
txar.insert(END, "\t* %s" % self.items[i], 'color5')
txar.insert(END, "\n")
def show_keyitems(self, txar):
txar.insert(END, "\n")
for i in range(len(self.npc)):
txar.insert(END, "\t* %s" % self.npc[i], 'color5')
txar.insert(END, "\n")
def show_mechanics(self, txar):
txar.insert(END, "\n")
for i in range(len(self.npcis)):
txar.insert(END, "\t* %s" % self.npcis[i], 'color5')
txar.insert(END, "\n")
def give_item(self, item):
if item in self.items:
return item
else:
return None
| epl-1.0 | -5,393,833,993,856,727,000 | 20.4375 | 115 | 0.535471 | false |
jsfan/requests-oauthlib | requests_oauthlib/oauth2_session.py | 1 | 17215 | from __future__ import unicode_literals
import logging
from oauthlib.common import generate_token, urldecode
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import TokenExpiredError, is_secure_transport
import requests
log = logging.getLogger(__name__)
class TokenUpdated(Warning):
def __init__(self, token):
super(TokenUpdated, self).__init__()
self.token = token
class OAuth2Session(requests.Session):
"""Versatile OAuth 2 extension to :class:`requests.Session`.
Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec
including the four core OAuth 2 grants.
Can be used to create authorization urls, fetch tokens and access protected
resources using the :class:`requests.Session` interface you are used to.
- :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant
- :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant
- :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant
- :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant
Note that the only time you will be using Implicit Grant from python is if
you are driving a user agent able to obtain URL fragments.
"""
def __init__(self, client_id=None, client=None, auto_refresh_url=None,
auto_refresh_kwargs=None, scope=None, redirect_uri=None, token=None,
state=None, token_updater=None, **kwargs):
"""Construct a new OAuth 2 client session.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param scope: List of scopes you wish to request access to
:param redirect_uri: Redirect URI you registered as callback
:param token: Token dictionary, must include access_token
and token_type.
:param state: State string used to prevent CSRF. This will be given
when creating the authorization url and must be supplied
when parsing the authorization response.
Can be either a string or a no argument callable.
:auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply
this if you wish the client to automatically refresh
your access tokens.
:auto_refresh_kwargs: Extra arguments to pass to the refresh token
endpoint.
:token_updater: Method with one argument, token, to be used to update
your token database on automatic token refresh. If not
set a TokenUpdated warning will be raised when a token
has been refreshed. This warning will carry the token
in its token argument.
:param kwargs: Arguments to pass to the Session constructor.
"""
super(OAuth2Session, self).__init__(**kwargs)
self._client = client or WebApplicationClient(client_id, token=token)
self.token = token or {}
self.scope = scope
self.redirect_uri = redirect_uri
self.state = state or generate_token
self._state = state
self.auto_refresh_url = auto_refresh_url
self.auto_refresh_kwargs = auto_refresh_kwargs or {}
self.token_updater = token_updater
# Allow customizations for non compliant providers through various
# hooks to adjust requests and responses.
self.compliance_hook = {
'access_token_response': set(),
'refresh_token_response': set(),
'protected_request': set(),
}
def new_state(self):
"""Generates a state string to be used in authorizations."""
try:
self._state = self.state()
log.debug('Generated new state %s.', self._state)
except TypeError:
self._state = self.state
log.debug('Re-using previously supplied state %s.', self._state)
return self._state
@property
def client_id(self):
return getattr(self._client, "client_id", None)
@client_id.setter
def client_id(self, value):
self._client.client_id = value
@client_id.deleter
def client_id(self):
del self._client.client_id
@property
def token(self):
return getattr(self._client, "token", None)
@token.setter
def token(self, value):
self._client.token = value
self._client.populate_token_attributes(value)
@property
def access_token(self):
return getattr(self._client, "access_token", None)
@access_token.setter
def access_token(self, value):
self._client.access_token = value
@access_token.deleter
def access_token(self):
del self._client.access_token
@property
def authorized(self):
"""Boolean that indicates whether this session has an OAuth token
or not. If `self.authorized` is True, you can reasonably expect
OAuth-protected requests to the resource to succeed. If
`self.authorized` is False, you need the user to go through the OAuth
authentication dance before OAuth-protected requests to the resource
will succeed.
"""
return bool(self.access_token)
def authorization_url(self, url, state=None, **kwargs):
"""Form an authorization URL.
:param url: Authorization endpoint url, must be HTTPS.
:param state: An optional state string for CSRF protection. If not
given it will be generated for you.
:param kwargs: Extra parameters to include.
:return: authorization_url, state
"""
state = state or self.new_state()
return self._client.prepare_request_uri(url,
redirect_uri=self.redirect_uri,
scope=self.scope,
state=state,
**kwargs), state
def fetch_token(self, token_url, code=None, authorization_response=None,
body='', auth=None, username=None, password=None, method='POST',
timeout=None, headers=None, verify=True, proxies=None, **kwargs):
"""Generic method for fetching an access token from the token endpoint.
If you are using the MobileApplicationClient you will want to use
token_from_fragment instead of fetch_token.
:param token_url: Token endpoint URL, must use HTTPS.
:param code: Authorization code (used by WebApplicationClients).
:param authorization_response: Authorization response URL, the callback
URL of the request back to you. Used by
WebApplicationClients instead of code.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param username: Username used by LegacyApplicationClients.
:param password: Password used by LegacyApplicationClients.
:param method: The HTTP method used to make the request. Defaults
to POST, but may also be GET. Other methods should
be added as needed.
:param headers: Dict to default request headers with.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not is_secure_transport(token_url):
raise InsecureTransportError()
if not code and authorization_response:
self._client.parse_request_uri_response(authorization_response,
state=self._state)
code = self._client.code
elif not code and isinstance(self._client, WebApplicationClient):
code = self._client.code
if not code:
raise ValueError('Please supply either code or '
'authorization_response parameters.')
body = self._client.prepare_request_body(code=code, body=body,
redirect_uri=self.redirect_uri, username=username,
password=password, **kwargs)
client_id = kwargs.get('client_id', '')
if auth is None:
if client_id:
log.debug('Encoding client_id "%s" with client_secret as Basic auth credentials.', client_id)
client_secret = kwargs.get('client_secret', '')
client_secret = client_secret if client_secret is not None else ''
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
elif username:
if password is None:
raise ValueError('Username was supplied, but not password.')
log.debug('Encoding username, password as Basic auth credentials.')
auth = requests.auth.HTTPBasicAuth(username, password)
headers = headers or {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
}
self.token = {}
if method.upper() == 'POST':
r = self.post(token_url, data=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify, proxies=proxies)
log.debug('Prepared fetch token request body %s', body)
elif method.upper() == 'GET':
# if method is not 'POST', switch body to querystring and GET
r = self.get(token_url, params=dict(urldecode(body)),
timeout=timeout, headers=headers, auth=auth,
verify=verify, proxies=proxies)
log.debug('Prepared fetch token request querystring %s', body)
else:
raise ValueError('The method kwarg must be POST or GET.')
log.debug('Request to fetch token completed with status %s.',
r.status_code)
log.debug('Request headers were %s', r.request.headers)
log.debug('Request body was %s', r.request.body)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['access_token_response']))
for hook in self.compliance_hook['access_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self._client.parse_request_body_response(r.text, scope=self.scope)
self.token = self._client.token
log.debug('Obtained token %s.', self.token)
return self.token
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(authorization_response,
state=self._state)
self.token = self._client.token
return self.token
def refresh_token(self, token_url, refresh_token=None, body='', auth=None,
timeout=None, headers=None, verify=True, proxies=None, **kwargs):
"""Fetch a new access token using a refresh token.
:param token_url: The token endpoint, must be HTTPS.
:param refresh_token: The refresh_token to use.
:param body: Optional application/x-www-form-urlencoded body to add the
include in the token request. Prefer kwargs over body.
:param auth: An auth tuple or method as accepted by requests.
:param timeout: Timeout of the request in seconds.
:param verify: Verify SSL certificate.
:param kwargs: Extra parameters to include in the token request.
:return: A token dict
"""
if not token_url:
raise ValueError('No token endpoint set for auto_refresh.')
if not is_secure_transport(token_url):
raise InsecureTransportError()
refresh_token = refresh_token or self.token.get('refresh_token')
log.debug('Adding auto refresh key word arguments %s.',
self.auto_refresh_kwargs)
kwargs.update(self.auto_refresh_kwargs)
body = self._client.prepare_refresh_body(body=body,
refresh_token=refresh_token, scope=self.scope, **kwargs)
log.debug('Prepared refresh token request body %s', body)
if headers is None:
headers = {
'Accept': 'application/json',
'Content-Type': (
'application/x-www-form-urlencoded;charset=UTF-8'
),
}
r = self.post(token_url, data=dict(urldecode(body)), auth=auth,
timeout=timeout, headers=headers, verify=verify, withhold_token=True, proxies=proxies)
log.debug('Request to refresh token completed with status %s.',
r.status_code)
log.debug('Response headers were %s and content %s.',
r.headers, r.text)
log.debug('Invoking %d token response hooks.',
len(self.compliance_hook['refresh_token_response']))
for hook in self.compliance_hook['refresh_token_response']:
log.debug('Invoking hook %s.', hook)
r = hook(r)
self.token = self._client.parse_request_body_response(r.text, scope=self.scope)
if not 'refresh_token' in self.token:
log.debug('No new refresh token given. Re-using old.')
self.token['refresh_token'] = refresh_token
return self.token
def request(self, method, url, data=None, headers=None, withhold_token=False,
client_id=None, client_secret=None, **kwargs):
"""Intercept all requests and add the OAuth 2 token if present."""
if not is_secure_transport(url):
raise InsecureTransportError()
if self.token and not withhold_token:
log.debug('Invoking %d protected resource request hooks.',
len(self.compliance_hook['protected_request']))
for hook in self.compliance_hook['protected_request']:
log.debug('Invoking hook %s.', hook)
url, headers, data = hook(url, headers, data)
log.debug('Adding token %s to request.', self.token)
try:
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
# Attempt to retrieve and save new access token if expired
except TokenExpiredError:
if self.auto_refresh_url:
log.debug('Auto refresh is set, attempting to refresh at %s.',
self.auto_refresh_url)
# We mustn't pass auth twice.
auth = kwargs.pop('auth', None)
if client_id and client_secret and (auth is None):
log.debug('Encoding client_id "%s" with client_secret as Basic auth credentials.', client_id)
auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
token = self.refresh_token(
self.auto_refresh_url, auth=auth, **kwargs
)
if self.token_updater:
log.debug('Updating token to %s using %s.',
token, self.token_updater)
self.token_updater(token)
url, headers, data = self._client.add_token(url,
http_method=method, body=data, headers=headers)
else:
raise TokenUpdated(token)
else:
raise
log.debug('Requesting url %s using method %s.', url, method)
log.debug('Supplying headers %s and data %s', headers, data)
log.debug('Passing through key word arguments %s.', kwargs)
return super(OAuth2Session, self).request(method, url,
headers=headers, data=data, **kwargs)
def register_compliance_hook(self, hook_type, hook):
"""Register a hook for request/response tweaking.
Available hooks are:
access_token_response invoked before token parsing.
refresh_token_response invoked before refresh token parsing.
protected_request invoked before making a request.
If you find a new hook is needed please send a GitHub PR request
or open an issue.
"""
if hook_type not in self.compliance_hook:
raise ValueError('Hook type %s is not in %s.',
hook_type, self.compliance_hook)
self.compliance_hook[hook_type].add(hook)
| isc | 8,882,957,335,126,040,000 | 44.784574 | 117 | 0.60668 | false |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/crypto/_internal/algorithms/rsa_signing.py | 1 | 2339 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding, utils
from ..algorithm import SignatureAlgorithm
from ..transform import SignatureTransform
from ..._enums import SignatureAlgorithm as KeyVaultSignatureAlgorithm
class RsaSignatureTransform(SignatureTransform):
def __init__(self, key, padding_function, hash_algorithm):
super(RsaSignatureTransform, self).__init__()
self._key = key
self._padding_function = padding_function
self._hash_algorithm = hash_algorithm
def sign(self, digest):
return self._key.sign(digest, self._padding_function(digest), utils.Prehashed(self._hash_algorithm))
def verify(self, digest, signature):
self._key.verify(signature, digest, self._padding_function(digest), utils.Prehashed(self._hash_algorithm))
class RsaSsaPkcs1v15(SignatureAlgorithm):
def create_signature_transform(self, key):
return RsaSignatureTransform(key, lambda _: padding.PKCS1v15(), self._default_hash_algorithm)
class RsaSsaPss(SignatureAlgorithm):
def create_signature_transform(self, key):
return RsaSignatureTransform(key, self._get_padding, self._default_hash_algorithm)
def _get_padding(self, digest):
return padding.PSS(mgf=padding.MGF1(self._default_hash_algorithm), salt_length=len(digest))
class Ps256(RsaSsaPss):
_name = KeyVaultSignatureAlgorithm.ps256
_default_hash_algorithm = hashes.SHA256()
class Ps384(RsaSsaPss):
_name = KeyVaultSignatureAlgorithm.ps384
_default_hash_algorithm = hashes.SHA384()
class Ps512(RsaSsaPss):
_name = KeyVaultSignatureAlgorithm.ps512
_default_hash_algorithm = hashes.SHA512()
class Rs256(RsaSsaPkcs1v15):
_name = KeyVaultSignatureAlgorithm.rs256
_default_hash_algorithm = hashes.SHA256()
class Rs384(RsaSsaPkcs1v15):
_name = KeyVaultSignatureAlgorithm.rs384
_default_hash_algorithm = hashes.SHA384()
class Rs512(RsaSsaPkcs1v15):
_name = KeyVaultSignatureAlgorithm.rs512
_default_hash_algorithm = hashes.SHA512()
Ps256.register()
Ps384.register()
Ps512.register()
Rs256.register()
Rs384.register()
Rs512.register()
| mit | 355,841,851,340,012,100 | 30.186667 | 114 | 0.719111 | false |
Esri/ops-server-config | Publish/Portal/UpdatePortalGUIDs.py | 1 | 8962 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2015 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
#Name: UpdatePortalGUIDs.py
#
#Purpose: Performs search and replace on portal items based on the
# "searchID" and "replaceID" keys in the specified JSON file.
# File must have specified JSON keys:
# [{"searchID": "GUID", "replaceID": "GUID"}]
#
#==============================================================================
import sys
import os
import time
import traceback
import json
from portalpy import Portal
from portalpy import TEXT_BASED_ITEM_TYPES
# Add "Root folder"\SupportFiles to sys path inorder to import
# modules in subfolder
sys.path.append(os.path.join(os.path.dirname(
os.path.dirname(os.path.dirname(sys.argv[0]))), "SupportFiles"))
from Utilities import validate_user_repsonse_yesno
import logging
logging.basicConfig()
search_key = 'search'
replace_key = 'replace'
def format_item_info(item):
itemID = item.get('id')
itemTitle = item.get('title')
itemOwner = item.get('owner')
itemType = item.get('type')
return "Id: {:<34}Owner: {:<25}Type: {:25}Title: {:<40}".format(
itemID, itemOwner, itemType, itemTitle)
def print_args():
""" Print script arguments """
if len(sys.argv) < 5:
print '\n' + os.path.basename(sys.argv[0]) + \
' <PortalURL>' \
' <AdminUser>' \
' <AdminUserPassword>' \
' <IdMappingFile>' \
' {SearchQuery}'
print '\nWhere:'
print '\n\t<PortalURL> (required): URL of Portal ' \
'(i.e. https://fully_qualified_domain_name/arcgis)'
print '\n\t<AdminUser> (required): Primary portal administrator user.'
print '\n\t<AdminUserPassword> (required): Password for AdminUser.'
print '\n\t<IdMappingFile> (required): file containing the item id ' \
'mapping information (i.e. output file from FindOrphanedHostedServices.py script)'
print '\n\t{SearchQuery} (optional): Portal search query.'
return None
else:
# Set variables from parameter values
portal_address = sys.argv[1]
adminuser = sys.argv[2]
password = sys.argv[3]
id_mapping_file_path = None
search_query = None
if len(sys.argv) >= 5:
id_mapping_file_path = sys.argv[4].strip()
if len(sys.argv) >= 6:
search_query = sys.argv[5].strip()
return portal_address, adminuser, password, id_mapping_file_path, search_query
def update_item_properties(portal, item, search, replace):
''' Search/replace values in the item json properties '''
if item is not None:
try:
jsonPropsToUpdate = ['description', 'snippet', 'accessInformation', 'licenseInfo', 'url']
for jsonProp in jsonPropsToUpdate:
is_updated = False
propertyValue = item.get(jsonProp)
if propertyValue:
search_str_list = [search, search.lower(), search.upper()]
for search_str in search_str_list:
if propertyValue.find(search_str) > -1:
propertyValue = propertyValue.replace(search_str, replace)
is_updated = True
if is_updated:
portal.update_item(item['id'], {jsonProp: propertyValue})
except Exception as err:
print('ERROR: Exception: error occured while executing update_item_properties for item: "{}"'.format(str(item.get('id'))))
def update_item_data(portal, item, search, replace):
''' Search/replace values in the item data '''
if item is not None:
if item['type'] in TEXT_BASED_ITEM_TYPES:
try:
itemdata = portal.item_data(item['id'])
except Exception as err:
print('ERROR: Exception: update_item_data function could not get item data for item: "{}"'.format(str(item.get('id'))))
itemdata = None
if itemdata:
is_updated = False
search_str_list = [search, search.lower(), search.upper()]
for search_str in search_str_list:
try:
if itemdata.find(search_str) > -1:
itemdata = itemdata.replace(search_str, replace)
is_updated = True
except Exception as err:
print('ERROR: Exception: update_item_data function encountered error during search/replace for item: "{}"'.format(str(item.get('id'))))
if is_updated:
try:
portal.update_item(item['id'], {'text': itemdata})
except Exception as err:
print('ERROR: Exception: update_item_data function encountered error during update of item: "{}"'.format(str(item.get('id'))))
def main():
exit_err_code = 1
starting_cwd = os.getcwd()
# Print/get script arguments
results = print_args()
if not results:
sys.exit(exit_err_code)
portal_address, adminuser, password, id_mapping_file_path, search_query = results
total_success = True
title_break_count = 100
section_break_count = 75
print '=' * title_break_count
print 'Update Portal GUIDs'
print '=' * title_break_count
if not os.path.exists(id_mapping_file_path):
print '\nFile {} does not exist. Exiting.'.format(id_mapping_file_path)
sys.exit(0)
try:
portal = Portal(portal_address, adminuser, password)
print '\n{}'.format('-' * section_break_count)
print '- Searching for portal items...\n'
items_temp = portal.search(q=search_query, sort_field='owner')
items = []
for item in items_temp:
if not item['owner'].startswith('esri_'):
items.append(item)
for item in items:
print format_item_info(item)
print '\nFound {} items.'.format(len(items))
if len(items) > 0:
user_response = raw_input("\nDo you want to continue with the update? Enter 'y' to continue: ")
if validate_user_repsonse_yesno(user_response):
# Open id mapping file
file_dir = os.path.dirname(id_mapping_file_path)
file_name = os.path.basename(id_mapping_file_path)
if len(file_dir) == 0:
file_dir = os.getcwd()
os.chdir(file_dir)
id_mapping = json.load(open(file_name))
print '\n{}'.format('-' * section_break_count)
print '- Updating item and item data...\n'
for item in items:
print format_item_info(item)
for id_map in id_mapping:
search = id_map.get(search_key)
replace = id_map.get(replace_key)
update_item_properties(portal, item, search, replace)
update_item_data(portal, item, search, replace)
except:
total_success = False
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error
# into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + \
"\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "***** ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
os.chdir(starting_cwd)
print '\nDone.'
if total_success:
sys.exit(0)
else:
sys.exit(exit_err_code)
if __name__ == "__main__":
main()
| apache-2.0 | -1,482,835,201,219,073,800 | 36.814346 | 159 | 0.535706 | false |
ooici/coi-services | ion/processes/bootstrap/plugins/bootstrap_ingestion.py | 1 | 1318 | #!/usr/bin/env python
from ion.core.bootstrap_process import BootstrapPlugin
from pyon.public import Container
from interface.objects import IngestionQueue
from interface.services.dm.iingestion_management_service import IngestionManagementServiceProcessClient
class BootstrapIngestion(BootstrapPlugin):
"""
Bootstrap process for ingestion management.
"""
def on_initial_bootstrap(self, process, config, **kwargs):
"""
Defining the ingestion worker process is done in post_process_dispatcher.
Creating transform workers happens here...
"""
ing_ms_client = IngestionManagementServiceProcessClient(process=process)
self.container = Container.instance
exchange_point = config.get_safe('ingestion.exchange_point','science_data')
queues = config.get_safe('ingestion.queues',None)
if queues is None:
queues = [dict(name='science_granule_ingestion', type='SCIDATA')]
for i in xrange(len(queues)):
item = queues[i]
queues[i] = IngestionQueue(name=item['name'], type=item['type'], datastore_name=item['datastore_name'])
ing_ms_client.create_ingestion_configuration(name='standard ingestion config',
exchange_point_id=exchange_point,
queues=queues)
| bsd-2-clause | -4,062,449,384,635,165,700 | 37.764706 | 115 | 0.69044 | false |
rjw57/foldbeam | tests/rendering/test_core.py | 1 | 2549 | import unittest
import cairo
from osgeo.osr import SpatialReference
import numpy as np
from foldbeam.rendering import core
from foldbeam.rendering.renderer import RendererBase, set_geo_transform
from ..utils import surface_hash, output_surface
class TestGeoTransform(unittest.TestCase):
def setUp(self):
# Create a cairo image surface
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 640, 480)
self.cr = cairo.Context(self.surface)
def test_set_geo_transform(self):
self.assertEqual(self.cr.clip_extents(), (0.0, 0.0, 640.0, 480.0))
set_geo_transform(self.cr, 49, 51, 2, 1, 640, 480)
self.assertEqual(self.cr.clip_extents(), (49.0, 1.0, 51.0, 2.0))
renderer = RendererBase()
renderer.render_callable(self.cr)()
output_surface(self.surface, 'geo_transform')
self.assertEqual(surface_hash(self.surface), 3127943)
class TestCore(unittest.TestCase):
def setUp(self):
# Create a cairo image surface
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 640, 480)
self.cr = cairo.Context(self.surface)
self.cr.set_source_rgba(0,0,0,0)
self.cr.paint()
set_geo_transform(self.cr, 49, 51, 2, 1, 400, 200)
def test_render(self):
renderer = RendererBase()
renderer.render_callable(self.cr)()
output_surface(self.surface, 'renderer_base')
self.assertEqual(surface_hash(self.surface), 2986593)
class TestBoundary(unittest.TestCase):
def test_bbox(self):
srs = SpatialReference()
srs.ImportFromEPSG(27700) # British national grid
uk_area = core.boundary_from_envelope(core.Envelope(0, 700000, 1300000, 0, srs))
self.assertTrue(uk_area.contains_point(1,1))
self.assertTrue(uk_area.contains_point(690000,1200000))
self.assertTrue(uk_area.contains_point(10000, 10000))
latlng_srs = SpatialReference()
latlng_srs.ImportFromEPSG(4326) # WGS 84 lat/lng
uk_latlng = uk_area.transform_to(latlng_srs, 1000, 1.0)
self.assertTrue(uk_latlng.contains_point(-1.826189, 51.178844)) # Stonehenge
self.assertTrue(uk_latlng.contains_point(-3.07, 58.64)) # John o'Groats
self.assertTrue(uk_latlng.contains_point(-5.716111, 50.068611)) # Land's End
self.assertTrue(uk_latlng.contains_point(-4.333333, 53.283333)) # Anglesey
self.assertTrue(not uk_latlng.contains_point(-8.47, 51.897222)) # Cork
self.assertTrue(not uk_latlng.contains_point(2.3508, 48.8567)) # Paris
| apache-2.0 | -1,319,729,319,389,558,800 | 41.483333 | 88 | 0.674774 | false |
ngr/fe_sm_00 | fe_sm_00/settings.py | 1 | 2959 | """
Django settings for fe_sm_00 project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j#$z)^1b__ju_-3-lwl=77a1l)oo$5@s7c9f5%465r(-buptql'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'fe_sm_00.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fe_sm_00.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'fe_sm_00',
'HOST': 'rds-sm-00.cjlhgo3mq7ui.us-west-2.rds.amazonaws.com',
'USER': 'dj_dbuser',
'PASSWORD': 'P@ssw0rd',
'PORT': '3306',
},
'sqlite': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
| mit | -7,847,905,305,224,313,000 | 25.9 | 71 | 0.669483 | false |
arximboldi/pigeoncide | src/game/pigeon.py | 1 | 13252 | #
# Copyright (C) 2009 Juan Pedro Bolivar Puente, Alberto Villegas Erce
#
# This file is part of Pigeoncide.
#
# Pigeoncide is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Pigeoncide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from base.signal import weak_slot
from base.log import get_log
from base.util import *
from ent.observer import ObservableSpatialEntity
from ent.panda import ActorEntity, ModelEntity
from ent.physical import (DynamicPhysicalEntity, StandingPhysicalEntity,
OnFloorEntity)
from core.state import StateManager, State
from core.util import *
from core import task
import phys.geom as geom
import phys.mass as mass
from flock import BoidParams, BoidEntity, FlockEntity
from kill import KillableEntity
from crawler import CrawlerEntityDecorator
from weapon import Hittable
from physics import pigeon_category
import random
import weakref
from pandac.PandaModules import Vec3
from functools import partial
_log = get_log (__name__)
class PigeonFood (ModelEntity,
DynamicPhysicalEntity,
ObservableSpatialEntity):
food_model = 'obj/food.egg'
amount = 1.0
food_scale = 0.5
def __init__ (self, *a, **k):
super (PigeonFood, self).__init__ (
model = self.food_model,
geometry = geom.box (2, 2, 0.5),
*a, **k)
self.model.setTexture (loader.loadTexture ('obj/food.png'))
self.model_scale = self.food_scale
# A bit hackish, I guess
self.entities.game.pigeon_food.append (self)
def eat (self, cnt):
self.amount -= cnt
if self.amount <= 0.0:
self.dispose ()
else:
s = self.amount * self.food_scale
self.model_scale = Vec3 (s, s, s)
def dispose (self):
self.entities.game.pigeon_food.remove (self)
super (PigeonFood, self).dispose ()
class Pigeon (BoidEntity,
ActorEntity,
KillableEntity,
StateManager,
OnFloorEntity,
Hittable):
"""
TODO: Actually a StateManager has too much innecesary overhead. We
could try to make a lightweight version of it for this kind of
use.
"""
pigeon_model = 'char/pigeon-anims.egg'
pigeon_anims = { 'walk' : 'char/pigeon-walk.egg',
'fly' : 'char/pigeon-fly.egg',
'takeoff' : 'char/pigeon-takeoff.egg',
'land' : 'char/pigeon-land.egg',
'idle' : 'char/pigeon-idle.egg' }
pigeon_sweeping = True
pigeon_min_eat_distance = 100.
pigeon_z_limit = -50.
def __init__ (self,
model = pigeon_model,
anims = pigeon_anims,
boys = [],
*a, **k):
super (Pigeon, self).__init__ (
geometry = geom.capsule (2, 1),
#mass = mass.sphere (1, 2),
model = model,
anims = anims,
category = pigeon_category,
*a, **k)
self.on_is_on_floor_change += self.on_pigeon_is_on_floor
self.on_hit += self.on_pigeon_hit
self.on_death += self.on_pigeon_death
for boy in boys:
boy.on_boy_noise += self.on_boy_noise
boy.on_entity_set_position += self.on_boy_move
self.physical_hpr = Vec3 (90, 0, 0)
self.params = BoidParams ()
self.model_position = Vec3 (0, 0, -2)
self.model_scale = Vec3 (0.08, 0.08, 0.08)
self.model_hpr = Vec3 (180, 0, 0)
self.add_state ('fly', FlyState)
self.add_state ('walk', WalkState)
self.add_state ('follow', FollowState)
self.add_state ('fear', FearState)
self.add_state ('eat', EatState)
self.add_state ('hit', HitState)
self.add_state ('land', LandState)
self.add_state ('return', ReturnState)
self.add_state ('attack', AttackState)
self.model.loop ('fly')
self.curr_animation = 'fly'
self.anim_speed = 50
# Hack: 3D audio seems very slow, so only some pigeons emit
# some kinds of sounds.
if random.uniform (0, 10) < 2.:
self._coo_sounds = map (self.load_sound,
map (lambda x: "snd/pigeon-coo-%i.wav" % x,
range (1, 5)))
self.tasks.add (task.sequence (
task.wait (random.uniform (0, 20)),
task.loop (
task.func_wait (partial (random.uniform, 10, 30)),
task.run (lambda: random.choice (self._coo_sounds).play ()
))))
else:
self._coo_sounds = []
if random.uniform (0, 10) < 2.:
self._fly_sound = self.load_sound ('snd/pigeon-start.wav')
else:
self._fly_sound = None
if random.uniform (0, 10) < 2.:
self._fear_sound = self.load_sound ('snd/pigeon-flap.wav')
else:
self._fear_sound = None
self.start ('land')
def play_sound (self, sound):
#x, y, z = self.position
#u, v, w = self.linear_velocity
#sound.set3dAttributes (x, y, z, u, v, w)
if sound:
sound.play ()
def do_update (self, timer):
"""
Hack to avoid the tunneling effect. We manually sweep the
collision sphere using a cylinder.
"""
super (Pigeon, self).do_update (timer)
vlen = self.linear_velocity.length ()
if self.pigeon_sweeping:
self.geom.setParams (2., vlen * timer.delta)
self.model.setPlayRate (vlen / self.anim_speed, self.curr_animation)
self.check_limits ()
@weak_slot
def on_pigeon_is_on_floor (self, who, val):
if val and self.current and self.current.state_name == 'land':
self.change_state ('walk')
if val and self.curr_animation != 'walk':
self.model.loop ('walk')
self.curr_animation = 'walk'
elif not val and self.curr_animation != 'fly':
self.model.loop ('fly')
self.curr_animation = 'fly'
@weak_slot
def on_pigeon_hit (self, x):
self.enter_state ('hit')
@weak_slot
def on_boy_noise (self, boy, rad):
if distance_sq (boy.position, self.position) < rad ** 2:
if self.depth == 1:
self.enter_state ('fear', boy)
elif self.current and self.current.state_name == 'fear':
self.current.restart ()
@weak_slot
def on_pigeon_death (self):
self.force_finish ()
@weak_slot
def on_boy_move (self, boy, pos):
if distance_sq (pos, self.params.boid_center) > 500. ** 2:
if self.current and self.current.state_name != 'attack':
self.enter_state ('attack', boy)
elif self.current and self.current.state_name == 'attack':
self.current.restart ()
def find_food (self):
food = self.entities.game.pigeon_food
best = None
bestdist = 1000000.
pos = self.position
for f in food:
dist = distance_sq (f.position, pos)
if dist < self.pigeon_min_eat_distance ** 2 and dist < bestdist:
bestdist = dist
best = f
return best
def check_food (self, change = False):
best = self.find_food ()
if best:
self.enter_state ('eat', best)
def check_limits (self):
pos = self.position
if pos.getZ () < self.pigeon_z_limit:
_log.debug ("Pigeon needs repositioning. %s, %s" %
(str (pos), str (self)))
if self.depth == 1:
self.enter_state ('return')
class PigeonState (State, BoidParams):
anim_speed = 50.
def do_setup (self, *a, **k):
self.manager.change_params (self)
self.manager.anim_speed = self.anim_speed
self.do_pigeon_setup (*a, **k)
def do_sink (self):
self.pause ()
def do_unsink (self, *a, **k):
self.manager.change_params (self)
self.manager.anim_speed = self.anim_speed
self.resume ()
self.do_pigeon_unsink (self)
def do_update (self, timer):
super (PigeonState, self).do_update (timer)
def do_release (self):
self.do_pigeon_release ()
do_pigeon_release = nop
do_pigeon_setup = nop
do_pigeon_unsink = nop
class FollowState (PigeonState):
@weak_slot
def on_target_set_position (self, target, pos):
self.boid_target = pos
def do_pigeon_setup (self, target):
target.on_entity_set_position += self.on_target_set_position
self.boid_target = target.position
class FearState (FollowState, task.WaitTask):
anim_speed = 50.
duration = 3.
boid_f_target = 1.
boid_target_inv = True
boid_speed = 150
boid_power = 1000
def do_pigeon_setup (self, *a, **k):
super (FearState, self).do_pigeon_setup (*a, **k)
self.manager.play_sound (self.manager._fear_sound)
def do_pigeon_release (self):
super (FearState, self).do_pigeon_release ()
self.manager.change_state ('fly')
class AttackState (FollowState, task.WaitTask):
anim_speed = 50.
duration = 5.
boid_f_target = 1.
boid_speed = 200
boid_power = 100
def do_pigeon_setup (self, *a, **k):
super (AttackState, self).do_pigeon_setup (*a, **k)
self.manager.play_sound (self.manager._fear_sound)
def do_pigeon_release (self):
super (AttackState, self).do_pigeon_release ()
self.manager.change_state ('fly')
class EatState (FollowState):
boid_flying = False
boid_speed = 20
boid_f_target = 0.1
boid_f_randomness = 0.
boid_power = 100.
glutony = 0.3
eat_distance = 7
anim_speed = 10.
def do_pigeon_setup (self, target):
super (EatState, self).do_pigeon_setup (target)
self.happy_meal = target
def do_update (self, timer):
super (EatState, self).do_update (self)
best = self.manager.find_food ()
if best != self.happy_meal and best:
self.manager.change_state ('eat', best)
elif self.happy_meal:
if distance_sq (self.happy_meal.position, self.manager.position) < \
self.eat_distance ** 2:
self.boid_speed = 0.001
self.boid_power = 0.001
self.happy_meal.eat (timer.delta * self.glutony)
else:
self.manager.leave_state ()
class PatrolState (PigeonState):
def do_pigeon_setup (self):
super (PatrolState, self).do_pigeon_setup ()
self.tasks.add (task.sequence (
task.wait (random.uniform (15, 30)),
task.run (self.next_state)))
def do_update (self, timer):
super (PatrolState, self).do_update (timer)
self.manager.check_food ()
def next_state (self):
self.manager.change_state (
'land' if self.state_name == 'fly' else 'fly')
class WalkState (PatrolState):
anim_speed = 7.
boid_flying = False
boid_speed = 10
boid_max_far = 500
boid_f_bounds = 0.001
boid_power = 100.
boid_f_randomness = 0.
class ReturnState (PigeonState):
anim_speed = 50
boid_max_far = 100
boid_f_bounds = 0.1
boid_center = Vec3 (0, 0, 200)
def do_update (self, timer):
super (ReturnState, self).do_update (timer)
if distance_sq (self.manager.position, self.boid_center) < \
self.boid_max_far ** 2:
self.manager.leave_state ()
class LandState (PigeonState):
anim_speed = 50
boid_speed = 60
boid_max_far = 500
boid_f_bounds = 0.001
boid_flying = False
class FlyState (PatrolState):
anim_speed = 50.
boid_speed = 80.
boif_f_flight = 0.01
def do_pigeon_setup (self, *a, **k):
super (FlyState, self).do_pigeon_setup (*a, **k)
self.manager.play_sound (self.manager._fly_sound)
class HitState (PigeonState):
boid_flocking = False
boid_flying = False
boid_speed = 1000
def do_pigeon_setup (self):
self.tasks.add (task.sequence (task.wait (2.), task.run (self.kill)))
self.manager.pigeon_sweeping = False
def do_pigeon_release (self):
self.manager.pigeon_sweeping = True
| gpl-3.0 | -5,173,898,428,870,489,000 | 29.394495 | 80 | 0.561425 | false |
briancurtin/python-openstacksdk | openstack/tests/unit/network/v2/test_availability_zone.py | 1 | 1629 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.network.v2 import availability_zone
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'id': IDENTIFIER,
'name': '1',
'resource': '2',
'state': '3',
}
class TestAvailabilityZone(testtools.TestCase):
def test_basic(self):
sot = availability_zone.AvailabilityZone()
self.assertEqual('availability_zone', sot.resource_key)
self.assertEqual('availability_zones', sot.resources_key)
self.assertEqual('/availability_zones', sot.base_path)
self.assertEqual('network', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = availability_zone.AvailabilityZone(**EXAMPLE)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['resource'], sot.resource)
self.assertEqual(EXAMPLE['state'], sot.state)
| apache-2.0 | 6,330,750,237,324,627,000 | 35.2 | 75 | 0.70043 | false |
turon/openthread | tools/harness-automation/cases/router_8_2_5.py | 1 | 1878 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Router_8_2_5(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '8 2 5'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -986,593,235,084,160,300 | 39.826087 | 77 | 0.761448 | false |
DataDog/integrations-extras | aws_pricing/datadog_checks/aws_pricing/aws_pricing.py | 1 | 4071 | import json
from collections import defaultdict
import boto3
from botocore.exceptions import ClientError
from six import iteritems
from six.moves import filter, map
from datadog_checks.base import AgentCheck
from datadog_checks.base.errors import CheckException
class AwsPricingCheck(AgentCheck):
def check(self, instance):
try:
region_name = instance.get('region_name')
if not region_name:
region_name = 'us-east-1'
pricing_client = boto3.client('pricing', region_name=region_name)
service_codes = get_aws_service_codes(pricing_client)
rate_codes_dict = get_rate_codes_dict_from_instance(service_codes, instance)
# Python dictionaries evaluate to false when empty
if not rate_codes_dict:
message = 'No rate codes for existing AWS services were defined, please fix conf.yaml'
self.service_check('aws_pricing.status', self.CRITICAL, message=message)
raise CheckException(message)
missing_rate_codes = defaultdict(list)
for service_code, rate_codes in iteritems(rate_codes_dict):
for rate_code in rate_codes:
price_dimensions = get_aws_prices(pricing_client, service_code, rate_code)
if price_dimensions is None:
missing_rate_codes[service_code].append(rate_code)
continue
name = 'aws.pricing.{}'.format(service_code.lower())
price = get_price_from_price_dimensions(price_dimensions)
tags = get_tags_from_price_dimensions(price_dimensions)
self.gauge(name, price, tags)
# Python dictionaries evaluate to true when not empty
if not missing_rate_codes:
self.service_check('aws_pricing.status', self.OK)
else:
message = 'Pricing data not found for these service rate codes: {}'.format(dict(missing_rate_codes))
self.service_check('aws_pricing.status', self.WARNING, message=message)
except ClientError as client_error:
self.service_check('aws_pricing.status', self.CRITICAL, message=str(client_error))
raise CheckException('Pricing Service client error: {}'.format(str(client_error)))
def get_rate_codes_dict_from_instance(service_codes, instance):
rate_codes_dict = {}
for service_code in service_codes:
instance_rate_codes = instance.get(service_code)
if instance_rate_codes is not None:
rate_codes_dict[service_code] = instance_rate_codes
return rate_codes_dict
def get_aws_service_codes(pricing_client):
response = pricing_client.describe_services(FormatVersion='aws_v1')
service_codes = map(lambda service: service['ServiceCode'], response['Services'])
return service_codes
def get_aws_prices(pricing_client, service_code, rate_code):
response = pricing_client.get_products(
FormatVersion='aws_v1',
ServiceCode=service_code,
Filters=[{'Type': 'TERM_MATCH', 'Field': 'RateCode', 'Value': rate_code}],
MaxResults=1,
)
price_dimensions = None
if len(response['PriceList']) > 0:
response_obj = json.loads(response['PriceList'][0])
terms = response_obj['terms'].values()
price_dimensions = find_price_dimensions_by_rate_code(rate_code, terms)
return price_dimensions
def find_price_dimensions_by_rate_code(rate_code, terms):
rate_code_parts = rate_code.split('.')
term_code = '.'.join(rate_code_parts[:2])
term = next(filter(lambda term: term_code in term, terms))
price_dimensions = term[term_code]['priceDimensions'][rate_code]
return price_dimensions
def get_tags_from_price_dimensions(price_dimensions):
return {'rate_code': price_dimensions['rateCode'], 'unit': price_dimensions['unit']}
def get_price_from_price_dimensions(price_dimensions):
return float(price_dimensions['pricePerUnit']['USD'])
| bsd-3-clause | 5,405,679,731,763,715,000 | 36.009091 | 116 | 0.647998 | false |
ICT4H/dcs-mangrove | mangrove/transport/services/test_mediaSubmissionService.py | 2 | 3800 | from unittest import TestCase
from mock import Mock, patch, PropertyMock
from mangrove.datastore.database import DatabaseManager
from mangrove.form_model.field import PhotoField, TextField, FieldSet
from mangrove.form_model.form_model import FormModel
from mangrove.transport.services.MediaSubmissionService import MediaSubmissionService
class TestMediaSubmissionService(TestCase):
def setUp(self):
dbm = Mock(spec=DatabaseManager)
self.image = Mock()
self.image.size = 1000000
media = {"image.png": self.image}
form_code = "form_code"
self.form_model = Mock(spec=FormModel)
self.form_model.form_code = PropertyMock(return_value=form_code)
self.form_model.is_media_type_fields_present = PropertyMock(return_value=True)
with patch("mangrove.transport.services.MediaSubmissionService.get_form_model_by_code") as get_form_model:
get_form_model.return_value = self.form_model
self.media_submission_service = MediaSubmissionService(dbm, media, form_code)
def test_get_media_fields_and_update_values(self):
values = [{u'image': u'image.png'}]
counter = count_generator()
photo_field = PhotoField('image', 'image', 'image')
with patch(
"mangrove.transport.services.MediaSubmissionService.MediaSubmissionService.create_media_details_document") as document_created:
media_files = self.media_submission_service._get_media_fields_and_update_values([photo_field], values,
counter)
expected_files = {"1-image.png": self.image}
self.assertDictEqual(expected_files, media_files)
def test_get_media_fields_in_a_group_and_update_values(self):
values = [{"group": [{"image": "image.png", "name": "something"}]}]
counter = count_generator()
field1 = PhotoField('image', 'image', 'image')
field2 = TextField(name='name', code='name', label='wat is ur name')
field_set = FieldSet('group', 'group', 'group', field_set=[field1, field2])
with patch(
"mangrove.transport.services.MediaSubmissionService.MediaSubmissionService.create_media_details_document") as document_created:
media_files = self.media_submission_service._get_media_fields_and_update_values([field_set], values,
counter)
expected_files = {"1-image.png": self.image}
self.assertDictEqual(expected_files, media_files)
def test_get_media_fields_in_a_repeat_and_update_values(self):
values = [{"group": [{"image": "image.png", "name": "something"}, {"image": "image.png", "name": "something2"},
{"image": "image.png", "name": "something3"}]}]
counter = count_generator()
field1 = PhotoField('image', 'image', 'image')
field2 = TextField(name='name', code='name', label='wat is ur name')
field_set = FieldSet('group', 'group', 'group', field_set=[field1, field2])
with patch(
"mangrove.transport.services.MediaSubmissionService.MediaSubmissionService.create_media_details_document") as document_created:
media_files = self.media_submission_service._get_media_fields_and_update_values([field_set], values,
counter)
expected_files = {"1-image.png": self.image, "2-image.png": self.image, "3-image.png": self.image}
self.assertDictEqual(expected_files, media_files)
def count_generator():
count = 0
while True:
count += 1
yield count | bsd-3-clause | 1,023,166,376,036,075,600 | 56.590909 | 143 | 0.616842 | false |
nicfit/vexmpp | vexmpp/protocols/disco.py | 1 | 4899 | # -*- coding: utf-8 -*-
from .. import stream
from ..jid import Jid
from ..stanzas import Iq, ElementWrapper
from ..errors import XmppError
from ..utils import xpathFilter
"""XEP 30"""
from .. import getLogger
log = getLogger(__name__)
NS_URI_BASE = "http://jabber.org/protocol/disco"
NS_URI_INFO = "{}#info".format(NS_URI_BASE)
NS_URI_ITEMS = "{}#items".format(NS_URI_BASE)
async def getInfo(stream, to, node=None, timeout=None):
iq = Iq(to=to, request=("query", NS_URI_INFO), attrs={"node": node},
id_prefix="disco#info_get")
return (await stream.sendAndWait(iq, raise_on_error=True, timeout=timeout))
async def getItems(stream, to, node=None, timeout=None):
iq = Iq(to=to, request=("query", NS_URI_ITEMS), id_prefix="disco#items_get")
if node:
iq.set("node", node)
iq = await stream.sendAndWait(iq, raise_on_error=True, timeout=timeout)
return iq
class Identity:
def __init__(self, category, type, name=None, lang=None):
self.category = category
self.type = type
self.name = name
self.lang = lang
def __str__(self):
return ("Identity [category=%s type=%s name=%s lang=%s]"
% (self.category, self.type, self.name, self.lang))
def __hash__(self):
return self.__str__().__hash__()
def __eq__(self, o):
return (type(o) == type(self) and
o.category == self.category and
o.type == self.type and
o.name == self.name and
o.lang == self.lang)
class Info:
def __init__(self):
self.disco_jid = None
self.identities = set()
self.features = set()
self.items = set()
self.node = None
class DiscoCache:
def __init__(self):
self.cache = {}
def clear(self):
self.cache.clear()
def getJidsForFeature(self, feature):
jids = []
for disco_jid, disco_info in self.cache.items():
if feature in disco_info.features:
jids.append(disco_jid)
return jids
class DiscoCacheMixin(stream.Mixin):
def __init__(self):
self._cache = DiscoCache()
super().__init__([('disco_cache', self._cache)])
async def postSession(self, stream):
await self.update(stream, stream.jid.host)
async def update(self, stream, disco_jid):
self._cache.clear()
# Fetch all disco info for the server
disco_info = await self._disco(stream, disco_jid, True)
# Fetch details about all the server's items (but not info about each
# item)
if disco_info and disco_info.items is not None:
for jid in disco_info.items:
try:
await self._disco(stream, jid, False)
except XmppError as ex:
log.warn("Stanza error while disco'ing item '{}': {}"
.format(jid.full, ex))
async def _disco(self, stream, jid, fetch_items):
if not isinstance(jid, Jid):
jid = Jid(jid)
disco_info = Info()
disco_info.disco_jid = jid
info = await getInfo(stream, to=jid, timeout=stream.default_timeout)
for child in info.query:
if child.tag == "{%s}identity" % NS_URI_INFO:
ident = Identity(category=child.attrib['category'],
type=child.attrib['type'],
name=child.attrib.get('name', None))
disco_info.identities.add(ident)
elif child.tag == "{%s}feature" % NS_URI_INFO:
disco_info.features.add(child.attrib['var'])
if fetch_items:
items = await getItems(stream, jid,
timeout=stream.default_timeout)
for child in items.query:
if child.tag == "{%s}item" % NS_URI_ITEMS:
disco_info.items.add(Jid(child.attrib['jid']))
self._cache.cache[jid] = disco_info
return disco_info
class DiscoInfoMixin(stream.Mixin):
def __init__(self):
self._features = []
super().__init__([('disco_info_features', self._features)])
@xpathFilter([("/iq[@type='get']/ns:query", {"ns": NS_URI_INFO}),
("/iq[@type='get']/ns:query", {"ns": NS_URI_ITEMS})])
async def onStanza(self, stream, stanza):
log.debug("disco#info request")
if stanza.query.tag.startswith("{%s}" % NS_URI_INFO):
# disco#info
query = ElementWrapper(stanza.query)
ident = query.appendChild("identity")
ident.set("category", "client")
ident.set("name", "Botch")
ident.set("type", "bot")
query.appendChild("feature").set("var", NS_URI_INFO)
else:
# disco#items
pass
stream.send(stanza.resultResponse())
| mit | 4,763,337,293,667,032,000 | 30.606452 | 80 | 0.551949 | false |
wavefrontHQ/python-client | wavefront_api_client/models/response_container_monitored_service_dto.py | 1 | 4497 | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ResponseContainerMonitoredServiceDTO(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'response': 'MonitoredServiceDTO',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None): # noqa: E501
"""ResponseContainerMonitoredServiceDTO - a model defined in Swagger""" # noqa: E501
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
"""Gets the response of this ResponseContainerMonitoredServiceDTO. # noqa: E501
:return: The response of this ResponseContainerMonitoredServiceDTO. # noqa: E501
:rtype: MonitoredServiceDTO
"""
return self._response
@response.setter
def response(self, response):
"""Sets the response of this ResponseContainerMonitoredServiceDTO.
:param response: The response of this ResponseContainerMonitoredServiceDTO. # noqa: E501
:type: MonitoredServiceDTO
"""
self._response = response
@property
def status(self):
"""Gets the status of this ResponseContainerMonitoredServiceDTO. # noqa: E501
:return: The status of this ResponseContainerMonitoredServiceDTO. # noqa: E501
:rtype: ResponseStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResponseContainerMonitoredServiceDTO.
:param status: The status of this ResponseContainerMonitoredServiceDTO. # noqa: E501
:type: ResponseStatus
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerMonitoredServiceDTO, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseContainerMonitoredServiceDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| apache-2.0 | 1,715,293,696,044,203,000 | 30.669014 | 409 | 0.599956 | false |
timdiels/0install | zeroinstall/injector/fetch.py | 1 | 23894 | """
Downloads feeds, keys, packages and icons.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, NeedDownload, logger
import os, sys
from zeroinstall import support
from zeroinstall.support import tasks, basedir, portable_rename
from zeroinstall.injector.namespaces import XMLNS_IFACE, config_site
from zeroinstall.injector import model
from zeroinstall.injector.model import DownloadSource, Recipe, SafeException, escape, DistributionSource
from zeroinstall.injector.iface_cache import PendingFeed, ReplayAttack
from zeroinstall.injector.handler import NoTrustedKeys
from zeroinstall.injector import download
def _escape_slashes(path):
return path.replace('/', '%23')
def _get_feed_dir(feed):
"""The algorithm from 0mirror."""
if '#' in feed:
raise SafeException(_("Invalid URL '%s'") % feed)
scheme, rest = feed.split('://', 1)
assert '/' in rest, "Missing / in %s" % feed
domain, rest = rest.split('/', 1)
for x in [scheme, domain, rest]:
if not x or x.startswith('.'):
raise SafeException(_("Invalid URL '%s'") % feed)
return '/'.join(['feeds', scheme, domain, _escape_slashes(rest)])
class KeyInfoFetcher:
"""Fetches information about a GPG key from a key-info server.
See L{Fetcher.fetch_key_info} for details.
@since: 0.42
Example:
>>> kf = KeyInfoFetcher(fetcher, 'https://server', fingerprint)
>>> while True:
print kf.info
if kf.blocker is None: break
print kf.status
yield kf.blocker
"""
def __init__(self, fetcher, server, fingerprint):
self.fingerprint = fingerprint
self.info = []
self.blocker = None
if server is None: return
self.status = _('Fetching key information from %s...') % server
dl = fetcher.download_url(server + '/key/' + fingerprint)
from xml.dom import minidom
@tasks.async
def fetch_key_info():
tempfile = dl.tempfile
try:
yield dl.downloaded
self.blocker = None
tasks.check(dl.downloaded)
tempfile.seek(0)
doc = minidom.parse(tempfile)
if doc.documentElement.localName != 'key-lookup':
raise SafeException(_('Expected <key-lookup>, not <%s>') % doc.documentElement.localName)
self.info += doc.documentElement.childNodes
except Exception as ex:
doc = minidom.parseString('<item vote="bad"/>')
root = doc.documentElement
root.appendChild(doc.createTextNode(_('Error getting key information: %s') % ex))
self.info.append(root)
finally:
tempfile.close()
self.blocker = fetch_key_info()
class Fetcher(object):
"""Downloads and stores various things.
@ivar config: used to get handler, iface_cache and stores
@type config: L{config.Config}
@ivar key_info: caches information about GPG keys
@type key_info: {str: L{KeyInfoFetcher}}
"""
__slots__ = ['config', 'key_info', '_scheduler', 'external_store']
def __init__(self, config):
assert config.handler, "API change!"
self.config = config
self.key_info = {}
self._scheduler = None
self.external_store = os.environ.get('ZEROINSTALL_EXTERNAL_STORE')
@property
def handler(self):
return self.config.handler
@property
def scheduler(self):
if self._scheduler is None:
from . import scheduler
self._scheduler = scheduler.DownloadScheduler()
return self._scheduler
# (force is deprecated and ignored)
@tasks.async
def cook(self, required_digest, recipe, stores, force = False, impl_hint = None):
"""Follow a Recipe.
@param impl_hint: the Implementation this is for (if any) as a hint for the GUI
@see: L{download_impl} uses this method when appropriate"""
# Maybe we're taking this metaphor too far?
# Start a download for each ingredient
blockers = []
steps = []
try:
for stepdata in recipe.steps:
cls = StepRunner.class_for(stepdata)
step = cls(stepdata, impl_hint=impl_hint)
step.prepare(self, blockers)
steps.append(step)
while blockers:
yield blockers
tasks.check(blockers)
blockers = [b for b in blockers if not b.happened]
if self.external_store:
# Note: external_store will not yet work with non-<archive> steps.
streams = [step.stream for step in steps]
self._add_to_external_store(required_digest, recipe.steps, streams)
else:
# Create an empty directory for the new implementation
store = stores.stores[0]
tmpdir = store.get_tmp_dir_for(required_digest)
try:
# Unpack each of the downloaded archives into it in turn
for step in steps:
step.apply(tmpdir)
# Check that the result is correct and store it in the cache
store.check_manifest_and_rename(required_digest, tmpdir)
tmpdir = None
finally:
# If unpacking fails, remove the temporary directory
if tmpdir is not None:
support.ro_rmtree(tmpdir)
finally:
for step in steps:
step.close()
def _get_mirror_url(self, feed_url, resource):
"""Return the URL of a mirror for this feed."""
if self.config.mirror is None:
return None
if support.urlparse(feed_url).hostname == 'localhost':
return None
return '%s/%s/%s' % (self.config.mirror, _get_feed_dir(feed_url), resource)
def get_feed_mirror(self, url):
"""Return the URL of a mirror for this feed."""
return self._get_mirror_url(url, 'latest.xml')
def _get_archive_mirror(self, source):
if self.config.mirror is None:
return None
if support.urlparse(source.url).hostname == 'localhost':
return None
if sys.version_info[0] > 2:
from urllib.parse import quote
else:
from urllib import quote
return '{mirror}/archive/{archive}'.format(
mirror = self.config.mirror,
archive = quote(source.url.replace('/', '#'), safe = ''))
def _get_impl_mirror(self, impl):
return self._get_mirror_url(impl.feed.url, 'impl/' + _escape_slashes(impl.id))
@tasks.async
def get_packagekit_feed(self, feed_url):
"""Send a query to PackageKit (if available) for information about this package.
On success, the result is added to iface_cache.
"""
assert feed_url.startswith('distribution:'), feed_url
master_feed = self.config.iface_cache.get_feed(feed_url.split(':', 1)[1])
if master_feed:
fetch = self.config.iface_cache.distro.fetch_candidates(master_feed)
if fetch:
yield fetch
tasks.check(fetch)
# Force feed to be regenerated with the new information
self.config.iface_cache.get_feed(feed_url, force = True)
def download_and_import_feed(self, feed_url, iface_cache = None):
"""Download the feed, download any required keys, confirm trust if needed and import.
@param feed_url: the feed to be downloaded
@type feed_url: str
@param iface_cache: (deprecated)"""
from .download import DownloadAborted
assert iface_cache is None or iface_cache is self.config.iface_cache
self.config.iface_cache.mark_as_checking(feed_url)
logger.debug(_("download_and_import_feed %(url)s"), {'url': feed_url})
assert not os.path.isabs(feed_url)
if feed_url.startswith('distribution:'):
return self.get_packagekit_feed(feed_url)
primary = self._download_and_import_feed(feed_url, use_mirror = False)
@tasks.named_async("monitor feed downloads for " + feed_url)
def wait_for_downloads(primary):
# Download just the upstream feed, unless it takes too long...
timeout = tasks.TimeoutBlocker(5, 'Mirror timeout') # 5 seconds
yield primary, timeout
tasks.check(timeout)
try:
tasks.check(primary)
if primary.happened:
return # OK, primary succeeded!
# OK, maybe it's just being slow...
logger.info("Feed download from %s is taking a long time.", feed_url)
primary_ex = None
except NoTrustedKeys as ex:
raise # Don't bother trying the mirror if we have a trust problem
except ReplayAttack as ex:
raise # Don't bother trying the mirror if we have a replay attack
except DownloadAborted as ex:
raise # Don't bother trying the mirror if the user cancelled
except SafeException as ex:
# Primary failed
primary = None
primary_ex = ex
logger.warn(_("Feed download from %(url)s failed: %(exception)s"), {'url': feed_url, 'exception': ex})
# Start downloading from mirror...
mirror = self._download_and_import_feed(feed_url, use_mirror = True)
# Wait until both mirror and primary tasks are complete...
while True:
blockers = list(filter(None, [primary, mirror]))
if not blockers:
break
yield blockers
if primary:
try:
tasks.check(primary)
if primary.happened:
primary = None
# No point carrying on with the mirror once the primary has succeeded
if mirror:
logger.info(_("Primary feed download succeeded; aborting mirror download for %s") % feed_url)
mirror.dl.abort()
except SafeException as ex:
primary = None
primary_ex = ex
logger.info(_("Feed download from %(url)s failed; still trying mirror: %(exception)s"), {'url': feed_url, 'exception': ex})
if mirror:
try:
tasks.check(mirror)
if mirror.happened:
mirror = None
if primary_ex:
# We already warned; no need to raise an exception too,
# as the mirror download succeeded.
primary_ex = None
except ReplayAttack as ex:
logger.info(_("Version from mirror is older than cached version; ignoring it: %s"), ex)
mirror = None
primary_ex = None
except SafeException as ex:
logger.info(_("Mirror download failed: %s"), ex)
mirror = None
if primary_ex:
raise primary_ex
return wait_for_downloads(primary)
def _download_and_import_feed(self, feed_url, use_mirror):
"""Download and import a feed.
@param use_mirror: False to use primary location; True to use mirror."""
if use_mirror:
url = self.get_feed_mirror(feed_url)
if url is None: return None
logger.info(_("Trying mirror server for feed %s") % feed_url)
else:
url = feed_url
dl = self.download_url(url, hint = feed_url)
stream = dl.tempfile
@tasks.named_async("fetch_feed " + url)
def fetch_feed():
try:
yield dl.downloaded
tasks.check(dl.downloaded)
pending = PendingFeed(feed_url, stream)
if use_mirror:
# If we got the feed from a mirror, get the key from there too
key_mirror = self.config.mirror + '/keys/'
else:
key_mirror = None
keys_downloaded = tasks.Task(pending.download_keys(self, feed_hint = feed_url, key_mirror = key_mirror), _("download keys for %s") % feed_url)
yield keys_downloaded.finished
tasks.check(keys_downloaded.finished)
if not self.config.iface_cache.update_feed_if_trusted(pending.url, pending.sigs, pending.new_xml):
blocker = self.config.trust_mgr.confirm_keys(pending)
if blocker:
yield blocker
tasks.check(blocker)
if not self.config.iface_cache.update_feed_if_trusted(pending.url, pending.sigs, pending.new_xml):
raise NoTrustedKeys(_("No signing keys trusted; not importing"))
finally:
stream.close()
task = fetch_feed()
task.dl = dl
return task
def fetch_key_info(self, fingerprint):
try:
return self.key_info[fingerprint]
except KeyError:
self.key_info[fingerprint] = key_info = KeyInfoFetcher(self,
self.config.key_info_server, fingerprint)
return key_info
# (force is deprecated and ignored)
def download_impl(self, impl, retrieval_method, stores, force = False):
"""Download an implementation.
@param impl: the selected implementation
@type impl: L{model.ZeroInstallImplementation}
@param retrieval_method: a way of getting the implementation (e.g. an Archive or a Recipe)
@type retrieval_method: L{model.RetrievalMethod}
@param stores: where to store the downloaded implementation
@type stores: L{zerostore.Stores}
@rtype: L{tasks.Blocker}"""
assert impl
assert retrieval_method
if isinstance(retrieval_method, DistributionSource):
return retrieval_method.install(self.handler)
from zeroinstall.zerostore import manifest, parse_algorithm_digest_pair
best = None
for digest in impl.digests:
alg_name, digest_value = parse_algorithm_digest_pair(digest)
alg = manifest.algorithms.get(alg_name, None)
if alg and (best is None or best.rating < alg.rating):
best = alg
required_digest = digest
if best is None:
if not impl.digests:
raise SafeException(_("No <manifest-digest> given for '%(implementation)s' version %(version)s") %
{'implementation': impl.feed.get_name(), 'version': impl.get_version()})
raise SafeException(_("Unknown digest algorithms '%(algorithms)s' for '%(implementation)s' version %(version)s") %
{'algorithms': impl.digests, 'implementation': impl.feed.get_name(), 'version': impl.get_version()})
@tasks.async
def download_impl(method):
original_exception = None
while True:
try:
if isinstance(method, DownloadSource):
blocker, stream = self.download_archive(method, impl_hint = impl,
may_use_mirror = original_exception is None)
try:
yield blocker
tasks.check(blocker)
stream.seek(0)
if self.external_store:
self._add_to_external_store(required_digest, [method], [stream])
else:
self._add_to_cache(required_digest, stores, method, stream)
finally:
stream.close()
elif isinstance(method, Recipe):
blocker = self.cook(required_digest, method, stores, impl_hint = impl)
yield blocker
tasks.check(blocker)
else:
raise Exception(_("Unknown download type for '%s'") % method)
except download.DownloadError as ex:
if original_exception:
logger.info("Error from mirror: %s", ex)
raise original_exception
else:
original_exception = ex
mirror_url = self._get_impl_mirror(impl)
if mirror_url is not None:
logger.info("%s: trying implementation mirror at %s", ex, mirror_url)
method = model.DownloadSource(impl, mirror_url,
None, None, type = 'application/x-bzip-compressed-tar')
continue # Retry
raise
break
self.handler.impl_added_to_store(impl)
return download_impl(retrieval_method)
def _add_to_cache(self, required_digest, stores, retrieval_method, stream):
assert isinstance(retrieval_method, DownloadSource)
stores.add_archive_to_cache(required_digest, stream, retrieval_method.url, retrieval_method.extract,
type = retrieval_method.type, start_offset = retrieval_method.start_offset or 0)
def _add_to_external_store(self, required_digest, steps, streams):
from zeroinstall.zerostore.unpack import type_from_url
# combine archive path, extract directory and MIME type arguments in an alternating fashion
paths = map(lambda stream: stream.name, streams)
extracts = map(lambda step: step.extract or "", steps)
types = map(lambda step: step.type or type_from_url(step.url), steps)
args = [None]*(len(paths)+len(extracts)+len(types))
args[::3] = paths
args[1::3] = extracts
args[2::3] = types
# close file handles to allow external processes access
for stream in streams:
stream.close()
# delegate extracting archives to external tool
import subprocess
subprocess.call([self.external_store, "add", required_digest] + args)
# delete temp files
for path in paths:
os.remove(path)
# (force is deprecated and ignored)
def download_archive(self, download_source, force = False, impl_hint = None, may_use_mirror = False):
"""Fetch an archive. You should normally call L{download_impl}
instead, since it handles other kinds of retrieval method too.
It is the caller's responsibility to ensure that the returned stream is closed.
"""
from zeroinstall.zerostore import unpack
url = download_source.url
if not (url.startswith('http:') or url.startswith('https:') or url.startswith('ftp:')):
raise SafeException(_("Unknown scheme in download URL '%s'") % url)
mime_type = download_source.type
if not mime_type:
mime_type = unpack.type_from_url(download_source.url)
if not mime_type:
raise SafeException(_("No 'type' attribute on archive, and I can't guess from the name (%s)") % download_source.url)
if not self.external_store:
unpack.check_type_ok(mime_type)
if may_use_mirror:
mirror = self._get_archive_mirror(download_source)
else:
mirror = None
dl = self.download_url(download_source.url, hint = impl_hint, mirror_url = mirror)
if download_source.size is not None:
dl.expected_size = download_source.size + (download_source.start_offset or 0)
# (else don't know sizes for mirrored archives)
return (dl.downloaded, dl.tempfile)
# (force is deprecated and ignored)
def download_icon(self, interface, force = False):
"""Download an icon for this interface and add it to the
icon cache. If the interface has no icon do nothing.
@return: the task doing the import, or None
@rtype: L{tasks.Task}"""
logger.debug("download_icon %(interface)s", {'interface': interface})
modification_time = None
existing_icon = self.config.iface_cache.get_icon_path(interface)
if existing_icon:
file_mtime = os.stat(existing_icon).st_mtime
from email.utils import formatdate
modification_time = formatdate(timeval = file_mtime, localtime = False, usegmt = True)
feed = self.config.iface_cache.get_feed(interface.uri)
if feed is None:
return None
# Find a suitable icon to download
for icon in feed.get_metadata(XMLNS_IFACE, 'icon'):
type = icon.getAttribute('type')
if type != 'image/png':
logger.debug(_('Skipping non-PNG icon'))
continue
source = icon.getAttribute('href')
if source:
break
logger.warn(_('Missing "href" attribute on <icon> in %s'), interface)
else:
logger.info(_('No PNG icons found in %s'), interface)
return
dl = self.download_url(source, hint = interface, modification_time = modification_time)
@tasks.async
def download_and_add_icon():
stream = dl.tempfile
try:
yield dl.downloaded
tasks.check(dl.downloaded)
if dl.unmodified: return
stream.seek(0)
import shutil, tempfile
icons_cache = basedir.save_cache_path(config_site, 'interface_icons')
tmp_file = tempfile.NamedTemporaryFile(dir = icons_cache, delete = False)
shutil.copyfileobj(stream, tmp_file)
tmp_file.close()
icon_file = os.path.join(icons_cache, escape(interface.uri))
portable_rename(tmp_file.name, icon_file)
finally:
stream.close()
return download_and_add_icon()
def download_impls(self, implementations, stores):
"""Download the given implementations, choosing a suitable retrieval method for each.
If any of the retrieval methods are DistributionSources and
need confirmation, handler.confirm is called to check that the
installation should proceed.
"""
unsafe_impls = []
to_download = []
for impl in implementations:
logger.debug(_("start_downloading_impls: for %(feed)s get %(implementation)s"), {'feed': impl.feed, 'implementation': impl})
source = self.get_best_source(impl)
if not source:
raise SafeException(_("Implementation %(implementation_id)s of interface %(interface)s"
" cannot be downloaded (no download locations given in "
"interface!)") % {'implementation_id': impl.id, 'interface': impl.feed.get_name()})
to_download.append((impl, source))
if isinstance(source, DistributionSource) and source.needs_confirmation:
unsafe_impls.append(source.package_id)
@tasks.async
def download_impls():
if unsafe_impls:
confirm = self.handler.confirm_install(_('The following components need to be installed using native packages. '
'These come from your distribution, and should therefore be trustworthy, but they also '
'run with extra privileges. In particular, installing them may run extra services on your '
'computer or affect other users. You may be asked to enter a password to confirm. The '
'packages are:\n\n') + ('\n'.join('- ' + x for x in unsafe_impls)))
yield confirm
tasks.check(confirm)
blockers = []
for impl, source in to_download:
blockers.append(self.download_impl(impl, source, stores))
# Record the first error log the rest
error = []
def dl_error(ex, tb = None):
if error:
self.handler.report_error(ex)
else:
error.append((ex, tb))
while blockers:
yield blockers
tasks.check(blockers, dl_error)
blockers = [b for b in blockers if not b.happened]
if error:
from zeroinstall import support
support.raise_with_traceback(*error[0])
if not to_download:
return None
return download_impls()
def get_best_source(self, impl):
"""Return the best download source for this implementation.
@rtype: L{model.RetrievalMethod}"""
if impl.download_sources:
return impl.download_sources[0]
return None
def download_url(self, url, hint = None, modification_time = None, expected_size = None, mirror_url = None):
"""The most low-level method here; just download a raw URL.
It is the caller's responsibility to ensure that dl.stream is closed.
@param url: the location to download from
@param hint: user-defined data to store on the Download (e.g. used by the GUI)
@param modification_time: don't download unless newer than this
@param mirror_url: an altertive URL to try if this one fails
@type mirror_url: str
@rtype: L{download.Download}
@since: 1.5
"""
if self.handler.dry_run:
raise NeedDownload(url)
dl = download.Download(url, hint = hint, modification_time = modification_time, expected_size = expected_size, auto_delete = not self.external_store)
dl.mirror = mirror_url
self.handler.monitor_download(dl)
dl.downloaded = self.scheduler.download(dl)
return dl
class StepRunner(object):
"""The base class of all step runners.
@since: 1.10"""
def __init__(self, stepdata, impl_hint):
self.stepdata = stepdata
self.impl_hint = impl_hint
def prepare(self, fetcher, blockers):
pass
@classmethod
def class_for(cls, model):
for subcls in cls.__subclasses__():
if subcls.model_type == type(model):
return subcls
assert False, "Couldn't find step runner for %s" % (type(model),)
def close(self):
"""Release any resources (called on success or failure)."""
pass
class RenameStepRunner(StepRunner):
"""A step runner for the <rename> step.
@since: 1.10"""
model_type = model.RenameStep
def apply(self, basedir):
source = native_path_within_base(basedir, self.stepdata.source)
dest = native_path_within_base(basedir, self.stepdata.dest)
os.rename(source, dest)
class DownloadStepRunner(StepRunner):
"""A step runner for the <archive> step.
@since: 1.10"""
model_type = model.DownloadSource
def prepare(self, fetcher, blockers):
self.blocker, self.stream = fetcher.download_archive(self.stepdata, impl_hint = self.impl_hint, may_use_mirror = True)
assert self.stream
blockers.append(self.blocker)
def apply(self, basedir):
from zeroinstall.zerostore import unpack
assert self.blocker.happened
unpack.unpack_archive_over(self.stepdata.url, self.stream, basedir,
extract = self.stepdata.extract,
type=self.stepdata.type,
start_offset = self.stepdata.start_offset or 0)
def close(self):
self.stream.close()
def native_path_within_base(base, crossplatform_path):
"""Takes a cross-platform relative path (i.e using forward slashes, even on windows)
and returns the absolute, platform-native version of the path.
If the path does not resolve to a location within `base`, a SafeError is raised.
@since: 1.10
"""
assert os.path.isabs(base)
if crossplatform_path.startswith("/"):
raise SafeException("path %r is not within the base directory" % (crossplatform_path,))
native_path = os.path.join(*crossplatform_path.split("/"))
fullpath = os.path.realpath(os.path.join(base, native_path))
base = os.path.realpath(base)
if not fullpath.startswith(base + os.path.sep):
raise SafeException("path %r is not within the base directory" % (crossplatform_path,))
return fullpath
| lgpl-2.1 | -6,300,733,466,448,456,000 | 33.429395 | 151 | 0.697916 | false |
hiryou/MLPractice | neural_net/by_numpy.py | 1 | 3555 | from builtins import classmethod
import numpy as np
from datetime import datetime as dt
"""
Inspired by https://repl.it/repls/OrganicVainDoom#main.py
"""
class NeuralNet(object):
train_cnt = 0
epoch = 0
eta = 0.5
# TODO make constructor-only param
h_layers = [3]
X = None
Y = None
X_size = 0 # neural count
Y_size = 0 # neural count
# hidden layers & last output layers
W = list()
H = list()
def __init__(self, X, Y, epoch):
self.X, self.Y = self.__scaled(X, Y)
self.train_cnt = len(self.X)
self.X_size = len(self.X[0])
self.Y_size = len(self.Y[0])
self.epoch = epoch
self.h_layers.append(self.Y_size)
left_neuron_cnt = self.X_size
for neuron_cnt in self.h_layers:
ww = np.random.randn(left_neuron_cnt, neuron_cnt)
hh = np.full((self.train_cnt, neuron_cnt), -0.0001)
self.W.append(ww)
self.H.append(hh)
left_neuron_cnt = neuron_cnt
pass
@staticmethod
def sigmoid(s):
return 1 / (1 + np.exp(-s))
@staticmethod
def sigmoid_prime(sig):
return sig * (1 - sig)
def get_train_loss(self):
Y = self.__scaled_back(self.Y)
H_last = self.__scaled_back(self.H[-1])
return np.mean(
np.square(Y - H_last)
)
pass
def do_train(self):
for i in range(self.epoch):
self.__forward(self.X)
self.__backward()
#print("epoch = {}: loss = {}".format( i, str(self.get_train_loss()) ))
def __scaled(self, X, Y):
# normalize
# max 24h a day
# max score = 100
return X/24, Y/100
def __scaled_back(self, Y):
# max score = 100
return Y*100
def __forward(self, X):
left_mt = X
for idx in range(len(self.h_layers)):
net_H_idx = np.dot(left_mt, self.W[idx])
self.H[idx] = self.sigmoid(net_H_idx)
left_mt = self.H[idx]
return self.H[-1]
def __backward(self):
# delta: start initially from layer H2 (output)
delta_H = [None for idx in range(len(self.h_layers))]
delta_H[-1] = (self.Y - self.H[-1]) * self.sigmoid_prime(self.H[-1])
# then delta: reversed loop from semi-last element -> beginning
for idx in range(len(self.h_layers)-2, -1, -1):
delta_H[idx] = delta_H[idx+1].dot(self.W[idx+1].T) * self.sigmoid_prime(self.H[idx])
pass
# update weights: start from right most layer
for idx in range(len(self.h_layers) - 1, 0, -1):
self.W[idx] += (1 / self.train_cnt) * self.eta * self.H[idx-1].T.dot(delta_H[idx])
pass
# update weights: at layer W0 back to input
self.W[0] += (1 / self.train_cnt) * self.eta * self.X.T.dot(delta_H[0])
f = open('study-sleep-grade.txt')
lines = f.readlines()
f.close()
# print(lines)
x_all = []
y_all = []
for line in lines:
p = line.strip().split(", ")
y = p[0].strip().split(' ')
x = p[1].strip().split(' ')
x_all.append(x)
y_all.append(y)
INP = np.array((x_all[:-1]), dtype=float)
Y = np.array((y_all[:-1]), dtype=float)
nn = NeuralNet(INP, Y, epoch=1000)
print("-------------------------")
print("training ...")
tic = dt.now().microsecond
nn.do_train()
toc = dt.now().microsecond
print("-------------------------")
print("train loss = {}".format( str(nn.get_train_loss()) ))
print("Train taken {} micro-secs".format('{:,}'.format(toc - tic)))
| mit | 7,135,865,273,220,866,000 | 26.55814 | 96 | 0.536428 | false |
mohzah/rotopia | listener.py | 1 | 8277 |
from driver import Driver
# from robot.output import LOGGER
import logging, time, sys
from ConfigParser import SafeConfigParser
# log_path = '/home/m/repo/db/created_suites.log'
# logging.basicConfig(filename=log_path, format='%(message)s\n', level=logging.DEBUG)
def get_plan_name(path):
'''Returns directory name of the suite.
based on the path of test suite returns the top level directory name to be used as plan name
Args:
path (string): absolute path of the suite
'''
idx = path.find(HIERARCHY_PREFIX) # HIERARCHY_PREFIX is read from config file
path = path[idx+len(HIERARCHY_PREFIX):]
plan_name = path[:path.find('/')]
return plan_name
def read_config(server_name, https=False):
'''
different server names can be used for naming different sections in config file. hence,
config file can contian different settings for different servers.
Args:
server_name (str): specifies which server url and credentials to be used
https (bool): if true, uses url started with "https" in the config file, named "urls"
'''
parser = SafeConfigParser()
parser.read('config.ini')
url = 'url'
if https:
url = 'urls'
settings = {}
settings['username'] = parser.get(server_name, 'username')
settings['password'] = parser.get(server_name, 'password')
settings['url'] = parser.get(server_name, url)
settings['database'] = parser.get('database', 'dbpath')
settings['prefix'] = parser.get(server_name, 'HIERARCHY_PREFIX')
global HIERARCHY_PREFIX
HIERARCHY_PREFIX = settings['prefix']
return settings
class Listener:
'''
http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#using-listener-interface
'''
ROBOT_LISTENER_API_VERSION = 2 # Do Not Change This
def __init__(self, build, environment):
'''
Parameters come from invoking command i.e. variables seprated by ":" in pybot command
Args:
build (str): build name
environment (str): environment name
'''
settings = read_config('localbugz')
username = settings['username']
# login of the manager
# todo: omit it, make driver api more consistent
self.MANAGER = username
testopia_url = settings['url']
self.driver = Driver(testopia_url, username, settings['password'])
self.build = build
self.environment = environment
self.driver.set_build_and_environment(build, environment)
self.no_run_id = False
self.conn = Connector(settings['database'])
def start_suite(self, name, attrs):
'''
http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#listener-interface-method-signatures
'''
doc = attrs['doc'] # doc is <type 'unicode'>
self.absolute_path = attrs['source']
tests = attrs['tests'] # empty suits return empty list
if tests: # empty list -> False any list other than empty list -> True
if not self.conn.is_exported_suite(self.absolute_path):
# This is the first time this suite is executed and,
plan_name = get_plan_name(self.absolute_path)
plan_id = None
if self.conn.is_exported_plan(plan_name):
# a plan is already created for the suites in this directory
plan_id = self.conn.get_PlanID(plan_name)
else:
# no plan has been created earlier that this suite belong to
# plans are not created for every new suite but for new suites
# with different top level directory
plan_id = self.driver.create_plan(plan_name)['plan_id']
self.conn.insert_plan_as_exported(plan_name, plan_id)
# For every new suite a Run will be created:
run = self.driver.create_run(plan_id, str(self.build), self.MANAGER, summary=str(doc))
self.run_id = run['run_id']
self.conn.insert_as_exported(self.absolute_path, self.run_id)
else:
# This is not a new suite and a Run already exist for it
self.run_id = self.conn.get_RunID(self.absolute_path)
def start_test(self, name, attrs):#todo: update doc string
'''case [Documentation] must start with "case_id;;"
'''
self.newCase = False
caselongname = attrs['longname'] #todo: should change to 'id', should work in new version of robot
if not self.conn.is_exported_case(caselongname, self.absolute_path):
# This case is newly added to the test suite or is the first time executed
self.newCase = True
self.actions = []
self.results = []
try:
summary = name + ' - ' + attrs['doc']
plan = self.driver.get_test_plan(self.run_id)
case = self.driver.create_case(priority='Normal', summary=summary, plans=plan,
tester=self.MANAGER)
self.case_id = case['case_id'] #todo: should case_id be class variable or local is ok?
self.driver.add_to_run(self.case_id, self.run_id)
self.conn.insert_case_as_exported(caselongname, self.absolute_path, self.case_id)
except:
print "Unexpected error in new TestCase processing:", sys.exc_info()[0]
raise
else:
self.case_id = self.conn.get_CaseID(caselongname, self.absolute_path)
self.driver.caserun_running(self.run_id, self.case_id)
def end_test(self, name, attrs):
status = attrs['status']
if status == 'PASS':
self.driver.caserun_passed(self.run_id, self.case_id)
elif status == 'FAIL':
self.driver.caserun_failed(self.run_id, self.case_id)
if self.newCase:
# Steps and results for new Case will be collected in a list by using start_keyword method
# list will be converted to a string with a html list format and inserted in Testopia
def make_html_list(elements):
'''
converts a list of "<li>...</li>" strings (i.e. collected list) to
a <ol><li>...</li>...</ol> string
'''
elements = ''.join(elements)
return '<ol>%s</ol>' % elements
action = make_html_list(self.actions)
result = make_html_list(self.results)
self.driver.update_case_action_result(self.case_id, action, result)
# clean up
self.case_id = None
def end_suite(self, name, attrs):
self.no_run_id = False
status = attrs['status']
message = attrs['message']
self.run_id = None
def start_keyword(self, name, attrs):
if self.newCase:
self.actions.append('<li>%s</li>' % name)
self.results.append('<li>%s</li>' % (attrs['doc'] or "No doc for keyword"))
def close(self):
self.conn.close()
import sqlite3 as lite
class Connector():
def __init__(self, dbAddress):
self.con = lite.connect(dbAddress)
self.cur = self.con.cursor()
def commit(self):
try:
self.con.commit()
except lite.Error, e:
if self.con:
self.con.rollback()
raise e
def close(self):
self.con.close()
def has_result(self):
row = self.cur.fetchone()
if row:
return True
return False
def is_exported_suite(self, suite_path):
self.cur.execute("SELECT * FROM ExportedCases WHERE SuitePath=?", (suite_path,))
return self.has_result()
def get_RunID(self, suite_path):
self.cur.execute("SELECT RunID FROM ExportedCases WHERE SuitePath=?", (suite_path,))
row = self.cur.fetchone()
return row[0]
def get_CaseID(self, stringID, suite_path):
self.cur.execute("SELECT CaseID FROM ExportedCases WHERE \
SuitePath=? AND CaseStringID=?", (suite_path, stringID))
row = self.cur.fetchone()
return row[0]
def insert_as_exported(self, suite_path, run_id):
self.cur.execute("INSERT INTO ExportedCases (SuitePath, RunID, Timestamp) \
VALUES (?, ?, ?)", (suite_path, run_id, time.ctime()))
self.commit()
def is_exported_case(self, caseID, suite_path):
self.cur.execute("SELECT * FROM ExportedCases WHERE \
SuitePath=? AND CaseStringID=?", (suite_path, caseID))
return self.has_result()
def insert_case_as_exported(self, caseLongName, suite_path, case_id):
'''
'''
self.cur.execute("INSERT INTO ExportedCases (SuitePath, CaseStringID, CaseID, Timestamp) \
VALUES (?, ?, ?, ?)", (suite_path, caseLongName, case_id, time.ctime()))
self.commit()
def is_exported_plan(self, plan_name):
self.cur.execute("SELECT * FROM ExportedCases WHERE \
PlanName=?", (plan_name,))
return self.has_result()
def insert_plan_as_exported(self, plan_name, plan_id):
self.cur.execute("INSERT INTO ExportedCases (PlanName, PlanID, Timestamp) \
VALUES (?, ?, ?)", (plan_name, plan_id, time.ctime()))
self.commit()
def get_PlanID(self, plan_name):
self.cur.execute("SELECT PlanID FROM ExportedCases WHERE \
PlanName=?", (plan_name,))
row = self.cur.fetchone()
return row[0] | gpl-3.0 | -1,560,225,264,130,673,000 | 32.92623 | 115 | 0.691797 | false |
lgarren/spack | var/spack/repos/builtin/packages/r-simpleaffy/package.py | 1 | 2156 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RSimpleaffy(RPackage):
"""Provides high level functions for reading Affy .CEL files,
phenotypic data, and then computing simple things with it, such as
t-tests, fold changes and the like. Makes heavy use of the affy
library. Also has some basic scatter plot functions and mechanisms
for generating high resolution journal figures..."""
homepage = "http://bioconductor.org/packages/simpleaffy/"
url = "https://bioconductor.org/packages/3.5/bioc/src/contrib/simpleaffy_2.52.0.tar.gz"
version('2.52.0', 'aa305099a57b3d868be53dc8c539b74e')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-gcrma', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@2.52.0')
| lgpl-2.1 | 4,242,531,252,514,886,000 | 46.911111 | 96 | 0.668831 | false |
Lartza/lagbot | lagbot.py | 1 | 6589 | #!/usr/bin/env python3
# coding=utf-8
# lagirc, simple Python irc library
# Copyright (C) 2015 Lari Tikkanen
#
# Released under the GPLv3
# See LICENSE for details.
from configobj import ConfigObj
import logging
import re
import lagirc
import asyncio
from yapsy.PluginManager import PluginManager
from plugins.commandplugin import CommandPlugin
from plugins.handlerplugin import HandlerPlugin
from plugins.triggerplugin import TriggerPlugin
config = ConfigObj('config.cfg')
logging.basicConfig(level=config['global']['loglevel'])
if logging.getLogger().isEnabledFor(logging.DEBUG):
import warnings
warnings.resetwarnings()
class LagBot(lagirc.IRCClient):
def __init__(self):
super().__init__()
# Set in load_config
self.logger = None
self.nickname = None
self.username = None
self.realname = None
# Set in load_plugins
self.manager = None
self.commands = {}
self.handlers = []
self.triggers = {}
# Calls to init methods
self.load_config()
self.load_plugins()
def load_config(self, reload=False):
if reload:
config.reload()
else:
self.logger = logging.getLogger('LagBot')
self.nickname = config['global']['nickname']
self.username = config['global']['username']
self.realname = config['global']['realname']
self.logger.setLevel(config['global']['loglevel'])
self.logger.info('Config loaded')
def load_plugins(self, reload=False):
"""Loads all plugins"""
self.logger.info('Start initializing plugins')
self.logger.debug('Reloading plugins? {}'.format(reload))
if reload:
self.commands = {}
self.handlers = []
self.triggers = {}
for plugin in self.manager.getAllPlugins():
self.manager.deactivatePluginByName(plugin.name)
self.manager = PluginManager(
categories_filter={
'Command': CommandPlugin,
'Handler': HandlerPlugin,
'Trigger': TriggerPlugin,
},
directories_list=['plugins'], )
self.manager.collectPlugins()
for plugin in self.manager.getPluginsOfCategory('Command'):
self.manager.activatePluginByName(plugin.name, 'Command')
try:
for command in plugin.plugin_object.commands:
self.commands[command] = plugin.plugin_object
except AttributeError:
self.logger.warn('Plugin {} does not define any commands! Disabling')
self.manager.deactivatePluginByName(plugin.name)
self.logger.debug('Loaded plugin {}'.format(plugin.name))
for plugin in self.manager.getPluginsOfCategory('Handler'):
self.manager.activatePluginByName(plugin.name, 'Handler')
self.handlers.append(plugin.plugin_object)
self.logger.debug('Loaded plugin {}'.format(plugin.name))
for plugin in self.manager.getPluginsOfCategory('Trigger'):
self.manager.activatePluginByName(plugin.name, 'Trigger')
try:
for trigger in plugin.plugin_object.triggers:
self.triggers[re.compile(trigger)] = plugin.plugin_object
except AttributeError:
self.logger.warn('Plugin {} does not define any triggers! Disabling')
self.manager.deactivatePluginByName(plugin.name)
self.logger.debug('Loaded plugin {}'.format(plugin.name))
self.logger.info('Finish plugin initialization')
self.logger.debug('Commands: {}'.format(self.commands))
self.logger.debug('Handlers: {}'.format(self.handlers))
self.logger.debug('Triggers: {}'.format(self.triggers))
async def connected(self):
self.logger.info('Connected')
# Join all the channels defined in config
for channel in config['global'].as_list('channels'):
self.join(channel)
self.logger.info('Joined {0}'.format(channel))
def get_nick(self, user):
"""Return the nick from an irc user nick!user@host"""
return user.split('!', 1)[0]
def is_op(self, user, channel):
"""Checks if the user is set to have op permissions to the bot on a channel"""
try:
self.logger.debug(config[channel].as_list('ops'))
if user in config[channel].as_list('ops'):
self.logger.debug('{} matches {} ops'.format(user, channel))
return True
except KeyError:
self.logger.debug('No ops for channel {}'.format(channel))
return False
self.logger.debug("{} doesn't match ops for {}".format(user, channel))
return False
def is_owner(self, user):
"""Return whether user matches owner in config"""
if user == config['global']['owner']:
return True
return False
async def privmsg_received(self, user, channel, message):
self.logger.info('{} <{}> {}'.format(channel, self.get_nick(user), message))
if message.startswith('!'):
cmd = message.split(' ', 1)[0].lstrip('!')
try:
plugin = self.commands[cmd]
except KeyError:
self.logger.debug('No plugin found for command {}'.format(cmd))
plugin = None
if self.is_owner(user):
if cmd == 'reload_plugins':
self.load_plugins(reload=True)
if cmd == 'reload_config':
self.load_config(reload=True)
if plugin:
self.logger.debug('Excecuting plugin for command {}'.format(cmd))
plugin.execute(self, user, channel, message)
else:
for trigger, plugin in self.triggers.items():
if re.search(trigger, message) is not None:
plugin.execute(self, user, channel, message)
break
for handler in self.handlers:
self.logger.debug('Excecuting handlers')
handler.execute(self, user, channel, message)
def connection_lost(self, exc):
loop.stop()
loop = asyncio.get_event_loop()
if logging.getLogger().isEnabledFor(logging.DEBUG):
loop.set_debug(True)
coro = loop.create_connection(lambda: LagBot(), config['global']['host'],
int(config['global']['port']))
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
| gpl-3.0 | 3,076,230,145,216,212,500 | 38.45509 | 86 | 0.595842 | false |
fedebell/Laboratorio3 | relazione6/calcoli.py | 1 | 1559 | import uncertainties
from uncertainties import ufloat
import math
import numpy
import numpy
import pylab
from scipy.optimize import curve_fit
import math
import scipy.stats
import uncertainties
from uncertainties import unumpy
Vmeno = ufloat(-15.00, 15.00*0.005)
Vpiu = ufloat(14.99, 14.99*0.005)
R1 = ufloat(2.18, 2.18*0.008)*1000
R2 = ufloat(21.5, 21.5*0.008)*1000
print("Vmeno = ", Vmeno)
print("Vpiu = ", Vpiu)
print("R1 = ", R1)
print("R2 = ", R2)
A = -R2/R1
print("A_atteso = ", A)
#Resistenze ingresso
V1 = ufloat(9.68, 0.08)
V2 = ufloat(4.88, 0.04)
RS = ufloat(2.19, ((2.19*0.008)**2+0.01**2)**0.5)
R_in_att = RS*1/(V1/V2 -1)
print("V1 = ", V1)
print("V2 = ", V2)
print("RS = ", RS)
print("R_in_attesa = ", R_in_att)
deltaV = ufloat(1.00, 0.04)
deltat = ufloat(90, 1)*10**(-3)
s = deltaV/deltat
print("s = ", s)
#Amplificatore invertente
R1 = ufloat(218, ((0.008*218)**2+1)**0.5)
P1 = ufloat(97.8, ((0.008*97.8)**2+0.1**2)**0.5)*1000
VIN = ufloat(0.340, 0.002)
print("R1 = ", R1)
print("P1 = ", P1)
print("VIN = ", VIN)
#Integratore
R1 = ufloat(984, 984*0.008)
R2 = ufloat(11.77, 11.77*0.008)*1000
C = ufloat(45.2, 45.2*0.04)
print("R1 = ", R1)
print("R2 = ", R2)
print("C = ", C)
#Derivatore
deltaV = ufloat(1.00, 0.03)
deltat = ufloat(0.088, 0.004)
slew = deltaV/deltat
print("slew rate", slew)
V_I = ufloat(4.68, 0.02)
V_OUT = ufloat(1.02, 0.02)
A = V_OUT/V_I
print("Amplificazione derivatore a 100\,Hz:", A)
f = 0.100
f_t = 3.42515
Amax = 11.597690
A = Amax/(1+(f_t/f)**2)**0.5
print("Amplificazione derivatore a 100\,Hz attesa:", A)
| gpl-3.0 | 1,469,787,664,021,489,200 | 20.356164 | 55 | 0.630532 | false |
emc-openstack/storops | storops_test/vnx/resource/test_mirror_view.py | 1 | 23552 | # coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from unittest import TestCase
from hamcrest import assert_that, instance_of, raises, none, not_none
from hamcrest import equal_to, has_length
from storops.exception import VNXMirrorLunNotAvailableError, \
VNXMirrorNameInUseError, VNXMirrorAlreadyMirroredError, \
VNXMirrorImageNotFoundError, VNXMirrorFractureImageError, \
VNXMirrorSyncImageError, VNXMirrorPromoteNonLocalImageError, \
VNXMirrorPromotePrimaryError, VNXMirrorFeatureNotAvailableError, \
VNXMirrorNotFoundError, VNXDeleteMirrorWithSecondaryError, \
VNXMirrorRemoveSynchronizingError, VNXMirrorGroupAlreadyMemberError, \
VNXMirrorGroupMirrorNotMemberError, VNXMirrorGroupAlreadyPromotedError, \
VNXMirrorGroupNameInUseError, VNXMirrorException
from storops_test.vnx.cli_mock import patch_cli
from storops_test.vnx.cli_mock import t_cli
from storops.vnx.enums import VNXMirrorViewRecoveryPolicy, \
VNXMirrorViewSyncRate, VNXSPEnum, VNXMirrorImageState, \
VNXMirrorGroupRecoveryPolicy
from storops.vnx.resource.mirror_view import VNXMirrorView, \
VNXMirrorViewImage, VNXMirrorGroup, VNXMirrorGroupList, \
VNXMirrorViewAsync, VNXMirrorGroupAsync, VNXMirrorGroupAsyncList
__author__ = 'Cedric Zhuang'
class VNXMirrorViewTest(TestCase):
@patch_cli
def test_get_all(self):
mv_list = VNXMirrorView.get(t_cli())
assert_that(len(mv_list), equal_to(4))
@patch_cli(output='mirror_not_installed.txt')
def test_mirror_view_not_installed(self):
mv_list = VNXMirrorView.get(t_cli())
assert_that(len(mv_list), equal_to(0))
mv = VNXMirrorView.get(t_cli(), 'mv_sync_2')
assert_that(mv.existed, equal_to(False))
@patch_cli
def test_get(self):
mv = VNXMirrorView.get(t_cli(), 'mv_sync_2')
assert_that(mv.uid, equal_to(
'50:06:01:60:88:60:05:FE:04:00:00:00:00:00:00:00'))
assert_that(mv.name, equal_to('mv_sync_2'))
assert_that(mv.description, equal_to(''))
assert_that(mv.logical_unit_numbers, 30)
assert_that(mv.quiesce_threshold, equal_to(60))
assert_that(mv.recovery_policy,
equal_to(VNXMirrorViewRecoveryPolicy.MANUAL))
assert_that(len(mv.images), equal_to(2))
assert_that(mv.images[0], instance_of(VNXMirrorViewImage))
assert_that(mv.synchronization_rate,
equal_to(VNXMirrorViewSyncRate.MEDIUM))
assert_that(mv.existed, equal_to(True))
assert_that(mv.state, equal_to('Active'))
assert_that(mv.image_transitioning, equal_to(False))
assert_that(mv.image_size, equal_to(2097152))
assert_that(mv.image_count, equal_to(2))
assert_that(mv.image_faulted, equal_to(False))
assert_that(mv.minimum_number_of_images_required, equal_to(0))
assert_that(mv.write_intent_log_used, equal_to(True))
assert_that(mv.synchronizing_progress, equal_to(100))
assert_that(mv.remote_mirror_status, equal_to('Secondary Copy'))
assert_that(mv.faulted, equal_to(False))
assert_that(mv.transitioning, equal_to(False))
assert_that(mv.is_primary, equal_to(False))
@patch_cli
def test_image_properties(self):
mv = VNXMirrorView.get(t_cli(), 'mv0')
assert_that(mv.is_primary, equal_to(True))
assert_that(mv.primary_image.is_primary, equal_to(True))
assert_that(mv.secondary_image.is_primary, equal_to(False))
@patch_cli
def test_create_success(self):
mv = VNXMirrorView.create(t_cli(), 'mv0', 245)
assert_that(mv.name, equal_to('mv0'))
@patch_cli
def test_create_lun_not_available_for_mirror(self):
def f():
VNXMirrorView.create(t_cli(), 'mv0', 244)
assert_that(f, raises(VNXMirrorLunNotAvailableError, 'not available'))
@patch_cli
def test_create_name_in_use(self):
def f():
VNXMirrorView.create(t_cli(), 'mv0', 246)
assert_that(f, raises(VNXMirrorNameInUseError, 'in use'))
@patch_cli
def test_add_image_success(self):
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.add_image('192.168.1.94', 71)
assert_that(len(mv.images), equal_to(2))
@patch_cli
def test_add_image_already_mirrored(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.add_image('192.168.1.94', 72)
assert_that(f, raises(VNXMirrorAlreadyMirroredError, 'exists'))
@patch_cli
def test_get_image_found(self):
mv = VNXMirrorView.get(t_cli(), 'mv0')
image = mv.get_image('50:06:01:60:88:60:05:FE')
assert_that(image.state, equal_to(VNXMirrorImageState.SYNCHRONIZED))
@patch_cli
def test_get_image_not_found(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.get_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorImageNotFoundError, 'not found'))
@patch_cli
def test_remove_image_not_found(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.remove_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorImageNotFoundError, 'not found'))
@patch_cli
def test_remove_image_success(self):
mv = VNXMirrorView.get(t_cli(), 'mv0')
# no error raised
mv.remove_image()
@patch_cli
def test_remove_image_no_secondary_image(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv1')
mv.remove_image()
assert_that(f,
raises(VNXMirrorImageNotFoundError, 'no secondary'))
@patch_cli
def test_fracture_primary_image(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.fracture_image('50:06:01:60:B6:E0:1C:F4')
assert_that(f, raises(VNXMirrorFractureImageError, 'Cannot'))
@patch_cli
def test_fracture_image_success(self):
mv = VNXMirrorView.get(t_cli(), 'mv0')
# no error raised
mv.fracture_image()
@patch_cli
def test_fracture_image_not_found(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.fracture_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorImageNotFoundError))
@patch_cli
def test_sync_image_not_found(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.sync_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorImageNotFoundError))
@patch_cli
def test_sync_image_failed(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.sync_image()
assert_that(f, raises(VNXMirrorSyncImageError, 'failed'))
@patch_cli
def test_promote_image_not_found(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.promote_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorImageNotFoundError))
@patch_cli
def test_promote_non_local_image(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.promote_image()
assert_that(f, raises(VNXMirrorPromoteNonLocalImageError,
'not local'))
@patch_cli
def test_promote_already_promoted(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv0')
mv.promote_image('50:06:01:60:88:60:05:F0')
assert_that(f, raises(VNXMirrorPromotePrimaryError, 'primary image'))
@patch_cli
def test_mirror_view_feature_not_installed(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv9')
mv.delete()
assert_that(f, raises(VNXMirrorFeatureNotAvailableError,
'not installed'))
@patch_cli
def test_delete_mirror_not_found_error(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv8')
mv.delete()
assert_that(f, raises(VNXMirrorNotFoundError, 'not found'))
@patch_cli
def test_delete_mirror_has_secondary(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv7')
mv.delete()
assert_that(f, raises(VNXDeleteMirrorWithSecondaryError,
'at least one secondary'))
@patch_cli
def test_remove_mirror_image_is_synchronizing(self):
def f():
mv = VNXMirrorView.get(t_cli(), 'mv2')
mv.remove_image()
assert_that(f, raises(VNXMirrorRemoveSynchronizingError,
'is being synchronized'))
@patch_cli
def test_force_delete_mirror_has_secondary(self):
mv = VNXMirrorView.get(t_cli(), 'mv0')
# no error raised
mv.delete(force=True)
class VNXMirrorViewImageTest(TestCase):
@patch_cli
def test_properties(self):
mv = VNXMirrorView.get(t_cli(), 'mv_sync_2')
image = mv.get_image('50:06:01:60:88:60:05:FE')
assert_that(image.uid, equal_to('50:06:01:60:88:60:05:FE'))
assert_that(image.existed, equal_to(True))
assert_that(image.is_primary, equal_to(True))
assert_that(image.logical_unit_uid, equal_to(
'60:06:01:60:41:C4:3D:00:6E:1C:50:9D:05:95:E5:11'))
assert_that(image.condition, equal_to('Primary Image'))
assert_that(image.state, none())
assert_that(image.preferred_sp, equal_to(VNXSPEnum.SP_A))
class VNXMirrorGroupTest(TestCase):
@patch_cli
def test_create(self):
mg = VNXMirrorGroup.create(t_cli(), name='test_group')
assert_that(mg, instance_of(VNXMirrorGroup))
@patch_cli
def test_create_name_in_use(self):
def _inner():
VNXMirrorGroup.create(t_cli(), name='test_group_in_use')
assert_that(_inner, raises(VNXMirrorGroupNameInUseError))
@patch_cli
def test_create_and_add(self):
mirror = VNXMirrorView.get(t_cli(), name='mv_sync_2')
mg = VNXMirrorGroup.create(t_cli(), name='petermg1', mirror=mirror)
assert_that(mg, instance_of(VNXMirrorGroup))
@patch_cli
def test_get_single(self):
mg = VNXMirrorGroup.get(t_cli(), name='petermg')
assert_that(mg, instance_of(VNXMirrorGroup))
assert_that(mg.name, equal_to('petermg'))
assert_that(mg.gid, equal_to('50:06:01:60:B6:60:25:22:00:00:00:00'))
assert_that(mg.description, equal_to(''))
assert_that(mg.state, equal_to('Synchronized'))
assert_that(mg.role, equal_to('Primary'))
assert_that(mg.condition, equal_to('Active'))
assert_that(mg.policy, equal_to(VNXMirrorGroupRecoveryPolicy.MANUAL))
assert_that(mg.mirrors, has_length(2))
assert_that(mg.group_mirrors, has_length(2))
for m in mg.mirrors:
assert_that(m, instance_of(VNXMirrorView))
for mg in mg.group_mirrors:
assert_that(
mg.mirror_name,
not_none())
assert_that(mg.src_lun_id, instance_of(int))
@patch_cli
def test_get_all(self):
mg_list = VNXMirrorGroup.get(t_cli())
assert_that(len(mg_list), equal_to(2))
assert_that(mg_list, instance_of(VNXMirrorGroupList))
@patch_cli
def test_promote_group(self):
mg1 = VNXMirrorGroup.get(t_cli(), name='petermg1')
mg1.promote_group()
@patch_cli
def test_fracture_group(self):
mg1 = VNXMirrorGroup.get(t_cli(), name='petermg1')
mg1.fracture_group()
@patch_cli
def test_add_to_group(self):
mirror = VNXMirrorView.get(t_cli(), name='mv_sync_2')
mg1 = VNXMirrorGroup.get(t_cli(), name='petermg1')
mg1.add_mirror(mirror)
@patch_cli
def test_add_to_group_existed(self):
mirror = VNXMirrorView.get(t_cli(), name='mv0')
mg1 = VNXMirrorGroup.get(t_cli(), name='petermg')
def _inner():
mg1.add_mirror(mirror)
assert_that(_inner, raises(VNXMirrorGroupAlreadyMemberError))
@patch_cli
def test_remove_from_group(self):
mirror = VNXMirrorGroup.get(t_cli(), name='mv_sync_2')
mg1 = VNXMirrorGroup.get(t_cli(), name='petermg')
mg1.remove_mirror(mirror)
@patch_cli
def test_remove_from_group_already_removed(self):
mirror = VNXMirrorGroup.get(t_cli(), name='not_in_group')
mg1 = VNXMirrorGroup.get(t_cli(), name='petermg')
def _inner():
mg1.remove_mirror(mirror)
assert_that(_inner, raises(VNXMirrorGroupMirrorNotMemberError))
@patch_cli
def test_sync_group(self):
mg1 = VNXMirrorGroup.get(t_cli(), name='petermg')
mg1.sync_group()
@patch_cli
def test_sync_group_already_promoted(self):
mg1 = VNXMirrorGroup.get(t_cli(), name='mg_promote_on_primary')
assert_that(mg1.sync_group, raises(VNXMirrorGroupAlreadyPromotedError))
@patch_cli
def test_delete_group(self):
mg1 = VNXMirrorGroup.get(t_cli(), name='petermg')
mg1.delete()
class VNXMirrorViewAsyncTest(TestCase):
@patch_cli
def test_get_all(self):
mv_list = VNXMirrorViewAsync.get(t_cli())
assert_that(len(mv_list), equal_to(2))
@patch_cli
def test_get(self):
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_001')
assert_that(mv.uid, equal_to(
'8F:23:60:B6:60:01:06:50:08:00:00:00:00:00:00:00'))
assert_that(mv.name, equal_to('testdr_001'))
assert_that(mv.description, equal_to(''))
assert_that(mv.logical_unit_numbers, 55)
assert_that(mv.recovery_policy,
equal_to(VNXMirrorViewRecoveryPolicy.AUTO))
assert_that(len(mv.images), equal_to(2))
assert_that(mv.images[0], instance_of(VNXMirrorViewImage))
assert_that(mv.synchronization_rate,
equal_to(VNXMirrorViewSyncRate.MEDIUM))
assert_that(mv.existed, equal_to(True))
assert_that(mv.state, equal_to('Active'))
assert_that(mv.image_transitioning, equal_to(False))
assert_that(mv.image_size, equal_to(104857600))
assert_that(mv.image_count, equal_to(2))
assert_that(mv.image_faulted, equal_to(False))
assert_that(mv.minimum_number_of_images_required, equal_to(0))
assert_that(mv.synchronizing_progress, equal_to(100))
assert_that(mv.remote_mirror_status, equal_to('Mirrored'))
assert_that(mv.faulted, equal_to(False))
assert_that(mv.transitioning, equal_to(False))
assert_that(mv.is_primary, equal_to(True))
@patch_cli
def test_image_properties(self):
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_001')
assert_that(mv.is_primary, equal_to(True))
assert_that(mv.primary_image.is_primary, equal_to(True))
assert_that(mv.secondary_image.is_primary, equal_to(False))
@patch_cli
def test_create_success(self):
mv = VNXMirrorViewAsync.create(t_cli(), 'testdr_003', 71)
assert_that(mv.name, equal_to('testdr_003'))
@patch_cli
def test_create_lun_not_available_for_mirror(self):
def f():
VNXMirrorViewAsync.create(t_cli(), 'mv0', 244)
assert_that(f, raises(VNXMirrorException, 'LUN does not exist'))
@patch_cli
def test_create_name_in_use(self):
def f():
VNXMirrorViewAsync.create(t_cli(), 'testdr_003', 72)
assert_that(f, raises(VNXMirrorNameInUseError, 'in use'))
@patch_cli
def test_add_image_success(self):
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
mv.add_image('192.168.1.94', 71)
assert_that(len(mv.images), equal_to(2))
@patch_cli
def test_add_image_already_mirrored(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
mv.add_image('192.168.1.94', 72)
assert_that(f, raises(VNXMirrorAlreadyMirroredError, 'exists'))
@patch_cli
def test_get_image_found(self):
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_004')
image = mv.get_image('50:06:01:60:B6:60:23:7E')
assert_that(image.state, equal_to(VNXMirrorImageState.SYNCHRONIZED))
@patch_cli
def test_get_image_not_found(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_004')
mv.get_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorImageNotFoundError, 'not found'))
@patch_cli
def test_remove_image_not_found(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_004')
mv.remove_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorException, 'image does not exist'))
@patch_cli
def test_remove_image_success(self):
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_004')
# no error raised
mv.remove_image()
@patch_cli
def test_remove_image_no_secondary_image(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_003')
mv.remove_image()
assert_that(f,
raises(VNXMirrorImageNotFoundError, 'no secondary'))
@patch_cli
def test_fracture_primary_image(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
mv.fracture_image('50:06:01:60:B6:60:23:8F')
assert_that(f, raises(VNXMirrorException, 'does not exist'))
@patch_cli
def test_fracture_image_success(self):
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
# no error raised
mv.fracture_image()
@patch_cli
def test_sync_image_not_found(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
mv.sync_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorException, 'does not exist'))
@patch_cli
def test_sync_image_failed(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
mv.sync_image()
assert_that(f, raises(VNXMirrorException, 'already synchronized'))
@patch_cli
def test_promote_image_not_found(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
mv.promote_image('50:06:01:60:88:60:05:FF')
assert_that(f, raises(VNXMirrorException, 'does not exist'))
@patch_cli
def test_promote_non_local_image(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
mv.promote_image()
assert_that(f, raises(VNXMirrorException,
'promotion wasn\'t local'))
@patch_cli
def test_delete_mirror_not_found_error(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'mv8')
mv.delete()
assert_that(f, raises(VNXMirrorException, 'mirror does not exist'))
@patch_cli
def test_delete_mirror_has_secondary(self):
def f():
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_005')
mv.delete()
assert_that(f, raises(VNXMirrorException,
'mirror with secondary images'))
@patch_cli
def test_force_delete_mirror_has_secondary(self):
mv = VNXMirrorViewAsync.get(t_cli(), 'testdr_006')
# no error raised
mv.delete(force=True)
class VNXMirrorGroupAsyncTest(TestCase):
@patch_cli
def test_create(self):
mg = VNXMirrorGroupAsync.create(t_cli(), name='test_group')
assert_that(mg, instance_of(VNXMirrorGroupAsync))
@patch_cli
def test_create_name_in_use(self):
def _inner():
VNXMirrorGroupAsync.create(t_cli(), name='test_group_in_use')
assert_that(_inner, raises(VNXMirrorException, 'same name as'))
@patch_cli
def test_create_and_add(self):
mirror = VNXMirrorViewAsync.get(t_cli(), name='testdr_004')
mg = VNXMirrorGroupAsync.create(t_cli(), name='petermg1',
mirror=mirror)
assert_that(mg, instance_of(VNXMirrorGroupAsync))
@patch_cli
def test_get_single(self):
mg = VNXMirrorGroupAsync.get(t_cli(), name='petermg')
assert_that(mg, instance_of(VNXMirrorGroupAsync))
assert_that(mg.name, equal_to('petermg'))
assert_that(mg.gid, equal_to('50:06:01:60:B6:60:23:8F:03:00:00:00'))
assert_that(mg.description, equal_to(''))
assert_that(mg.state, equal_to('Synchronized'))
assert_that(mg.role, equal_to('Primary'))
assert_that(mg.condition, equal_to('Normal'))
assert_that(mg.policy, equal_to(VNXMirrorGroupRecoveryPolicy.AUTO))
assert_that(len(mg.group_mirrors), equal_to(1))
@patch_cli
def test_get_all(self):
mg_list = VNXMirrorGroupAsync.get(t_cli())
assert_that(len(mg_list), equal_to(2))
assert_that(mg_list, instance_of(VNXMirrorGroupAsyncList))
@patch_cli
def test_promote_group(self):
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='petermg2')
mg1.promote_group()
@patch_cli
def test_fracture_group(self):
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='petermg1')
mg1.fracture_group()
@patch_cli
def test_add_to_group(self):
mirror = VNXMirrorViewAsync.get(t_cli(), name='testdr_004')
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='petermg1')
mg1.add_mirror(mirror)
@patch_cli(output='mirror_-async_-addtogroup_-name_petermg1_'
'-mirrorname_testdr_004_ALREADYMEMBER.txt')
def test_add_to_group_existed(self):
mirror = VNXMirrorViewAsync.get(t_cli(), name='testdr_004')
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='petermg1')
def _inner():
mg1.add_mirror(mirror)
assert_that(_inner, raises(VNXMirrorGroupAlreadyMemberError))
@patch_cli
def test_remove_from_group(self):
mirror = VNXMirrorGroupAsync.get(t_cli(), name='testdr_004')
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='petermg1')
mg1.remove_mirror(mirror)
@patch_cli
def test_remove_from_group_already_removed(self):
mirror = VNXMirrorGroupAsync.get(t_cli(), name='testdr_003')
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='petermg1')
def _inner():
mg1.remove_mirror(mirror)
assert_that(_inner, raises(VNXMirrorGroupMirrorNotMemberError))
@patch_cli
def test_sync_group(self):
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='petermg')
mg1.sync_group()
@patch_cli
def test_delete_non_empty_group(self):
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='petermg')
def _inner():
mg1.delete()
assert_that(_inner, raises(VNXMirrorException, 'still has members'))
@patch_cli
def test_delete_group(self):
mg1 = VNXMirrorGroupAsync.get(t_cli(), name='test_group')
mg1.delete()
| apache-2.0 | 2,035,904,516,088,921,300 | 34.047619 | 79 | 0.619268 | false |
crawfordsm/zSALT | zsalt/extract.py | 1 | 5452 | #!/usr/bin/env python
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. See LICENSE file for more detail. #
"""
SPECEXTRACT extracts a 1-D spectrum from a 2-D data file.
Author Version Date
-----------------------------------------------
S. M. Crawford (SAAO) 1.0 15 Nov 2010
TODO
----
1. The task still needs to be written
LIMITATIONS
-----------
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os
import sys
import time
import numpy as np
from astropy.io import fits
from PySpectrograph.Spectra import Spectrum
from PySpectrograph.Spectra import apext
from PySpectrograph.Spectra import findobj
debug = True
def extract(hdu, ext=1, method='normal', section=[],
minsize=3.0, thresh=3.0, convert=True):
"""For a given image, extract a 1D spectra from the image
and write the spectra to the output file
"""
ap_list = []
i = ext
if hdu[i].name == 'SCI':
# set up the data, variance, and bad pixel frames
# first step is to find the region to extract
data_arr = hdu[i].data
try:
var_arr = hdu[hdu[i].header['VAREXT']].data
except:
var_arr = None
try:
bpm_arr = hdu[hdu[i].header['BPMEXT']].data
except:
bpm_arr = None
var_arr = None
bpm_arr = None
xarr = np.arange(len(data_arr[0]))
# convert using the WCS information
try:
w0 = hdu[i].header['CRVAL1']
dw = hdu[i].header['CD1_1']
except Exception as e:
msg = 'Error on Ext %i: %s' % (i, e)
raise Exception(msg)
warr = w0 + dw * xarr
# convert from air to vacuum
if convert:
warr = Spectrum.air2vac(warr)
# set up the sections in case of findobj
if section is None:
section = findobj.findObjects(
data_arr,
method='median',
specaxis=1,
minsize=minsize,
thresh=thresh,
niter=5)
# extract all of the regions
for sec in section:
ap = apext.apext(warr, data_arr, ivar=var_arr)
y1, y2 = sec
ap.flatten(y1, y2)
ap_list.append(ap)
return ap_list
def write_extract(ofile, ap_list, outformat='ascii', fvar=None, clobber=False):
"""Write out to either a txt file or fits file depending on the extension
of ofile
"""
if outformat == 'FITS':
write_extract_fits(ofile, ap_list, clobber)
elif outformat == 'ascii':
write_extract_text(ofile, ap_list, clobber)
else:
msg = '%s is not a supported output format' % outformat
raise Exception(msg)
return
def write_extract_text(ofile, ap_list, clobber=False):
"""Write out the extracted spectrum to a text file. If the file already
exists, this will not overwrite it. The first
For each spectrum in ap_list, it will add a columns onto the output file
so that the first column is always wavelength, the second column is
flux, and the third column is sigma, and then repeat the flux and sigma
columns
ofile: Output file to write
ap_list: List of extracted spectrum
clobber: delete ofile if it already exists
"""
if os.path.isfile(ofile) and not clobber:
return
# open the file
dout = saltio.openascii(ofile, 'w')
# first extract warr, assume it is the same for all frames
warr = ap_list[0].wave
# write out the spectrum
for i in range(len(warr)):
outstr = '%7.3f ' % warr[i]
for ap in ap_list:
flux = ap.ldata[i]
try:
fvar = abs(ap.lvar[i]) ** 0.5
except:
fvar = 1
outstr += "%7.3f %7.3f " % (flux, fvar)
outstr += '\n'
dout.write(outstr)
dout.close()
return
def write_extract_fits(ofile, ap_list, clobber=False):
"""Write out the extracted spectrum to a FITS table. If the file already
exists, this will not overwrite it.
For each spectrum in ap_list, it will add another extension to the
fits file. Each extension will have the first column as wavelength,
the second column as counts, and the third column as sigma on the
counts.
ofile: Output file to write
ap_list: List of extracted spectrum
clobber: delete ofile if it already exists
"""
# delete the file
if os.path.isfile(ofile) and clobber:
saltio.delete(ofile)
# create the primary array
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
# create the columns and the
for ap in ap_list:
fvar = abs(ap.lvar) ** 0.5
# create the columns
col1 = pyfits.Column(
name='wavelength',
format='D',
unit='Angstroms',
array=ap.wave)
col2 = pyfits.Column(
name='counts',
format='D',
unit='Counts',
array=ap.ldata)
col3 = pyfits.Column(name='counts_err', format='D', array=fvar)
# add to the table
tbhdu = pyfits.new_table([col1, col2, col3])
hdulist.append(tbhdu)
# write it out
hdulist.writeto(ofile)
return
| bsd-3-clause | 6,011,496,125,247,817 | 26.26 | 79 | 0.570983 | false |
atkvo/masters-bot | src/autobot/src/pidControl.py | 1 | 1392 | #!/usr/bin/env python
import rospy
from autobot.msg import drive_param
from autobot.msg import pid_input
from std_msgs.msg import String
import math
kp = 14.0 * 3
kd = 0.09 * 10 ## handling how fast
servo_offset = 18.5
prev_error = 0.0
vel_input = 0.0
mode = 'wall'
motorPub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)
def control(data):
global kp
global kd
global servo_offset
global prev_error
global vel_input
global mode
driveParam = drive_param()
driveParam.velocity = data.pid_vel
if mode == 'wall':
pid_error = data.pid_error
error = pid_error * kp
errordot = kd * (pid_error - prev_error)
angle = error + errordot
if angle > 100:
angle = 100
elif angle < -100:
angle = -100
prev_error = pid_error
print 'pid_error {}\nangle {}'.format(pid_error, angle)
driveParam.angle = angle
elif mode == 'corner':
print 'corner mode, angle 100'
driveParam.angle = 100
motorPub.publish(driveParam)
def update_mode(_mode):
global mode
mode = _mode.data
if __name__ == '__main__':
print("Listening to error for PID")
rospy.init_node('pid_controller', anonymous=True)
rospy.Subscriber("error", pid_input, control)
rospy.Subscriber("mode", String, update_mode)
rospy.spin()
| mit | -3,342,215,794,730,230,300 | 19.776119 | 74 | 0.617816 | false |
Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/telestream_cloud_qc/models/hdr_standard_type.py | 1 | 2817 | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class HdrStandardType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
GENERICHDR = "GenericHdr"
HDR10 = "Hdr10"
ARIBB67 = "AribB67"
allowable_values = [GENERICHDR, HDR10, ARIBB67] # noqa: E501
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""HdrStandardType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HdrStandardType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, HdrStandardType):
return True
return self.to_dict() != other.to_dict()
| mit | 5,876,936,412,095,734,000 | 26.617647 | 74 | 0.556621 | false |
ppyordanov/HCI_4_Future_Cities | Server/src/virtualenv/Lib/site-packages/setuptools/command/install_egg_info.py | 1 | 3830 | from setuptools import Command
from setuptools.archive_util import unpack_archive
from distutils import log, dir_util
import os, pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib', ('install_dir', 'install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name() + '.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
target = self.target
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink, (self.target,), "Removing " + self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(self.copytree, (),
"Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src, dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/', 'CVS/':
if src.startswith(skip) or '/' + skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp: return
filename, ext = os.path.splitext(self.target)
filename += '-nspkg.pth';
self.outputs.append(filename)
log.info("Installing %s", filename)
if not self.dry_run:
f = open(filename, 'wt')
for pkg in nsp:
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
trailer = '\n'
if '.' in pkg:
trailer = (
"; m and setattr(sys.modules[%r], %r, m)\n"
% ('.'.join(pth[:-1]), pth[-1])
)
f.write(
"import sys,types,os; "
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], "
"*%(pth)r); "
"ie = os.path.exists(os.path.join(p,'__init__.py')); "
"m = not ie and "
"sys.modules.setdefault(%(pkg)r,types.ModuleType(%(pkg)r)); "
"mp = (m or []) and m.__dict__.setdefault('__path__',[]); "
"(p not in mp) and mp.append(p)%(trailer)s"
% locals()
)
f.close()
def _get_all_ns_packages(self):
nsp = {}
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp['.'.join(pkg)] = 1
pkg.pop()
nsp = list(nsp)
nsp.sort() # set up shorter names first
return nsp
| mit | 105,027,369,872,871,100 | 35.47619 | 81 | 0.518799 | false |
wysiwyng/sr13 | configReader.py | 1 | 4083 | # Copyright (C) 2105 wysiwyng
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os, ConfigParser
class ConfigReader(object):
def __init__(self, path, debug = None):
self.debug = debug
self.debugMsg("opening config file and creating configParser")
self.parser = ConfigParser.ConfigParser()
cFile = open(os.path.join(path, "mai-bot.cfg"))
self.debugMsg("config file open, checking config file")
self.parser.readfp(cFile)
if not self.parser.has_section("mai-bot-cfg"):
raise ValueError("invalid config file")
self.debugMsg("config file is valid, ready to read values")
cFile.close()
def getKey(self, key):
return self.parser.get("mai-bot-cfg", key)
def getMaxSpeed(self):
if self.parser.has_option("mai-bot-cfg", "max-speed"):
return self.parser.getint("mai-bot-cfg", "max-speed")
else:
return 255
def getDistModifier(self):
if self.parser.has_option("mai-bot-cfg", "dist-modifier"):
return self.parser.getint("mai-bot-cfg", "dist-modifier")
else:
return 10
def getDistModifierBegin(self):
if self.parser.has_option("mai-bot-cfg", "dist-mod-begin"):
return self.parser.getint("mai-bot-cfg", "dist-mod-begin")
else:
return 80
def getCamResX(self):
if self.parser.has_option("mai-bot-cfg", "cam-res-x"):
return self.parser.getint("mai-bot-cfg", "cam-res-x")
else:
return 800
def getCamResY(self):
if self.parser.has_option("mai-bot-cfg", "cam-res-y"):
return self.parser.getint("mai-bot-cfg", "cam-res-y")
else:
return 600
def getMaxTries(self):
if self.parser.has_option("mai-bot-cfg", "max-tries"):
return self.parser.getint("mai-bot-cfg", "max-tries")
else:
return 2
def getDirection(self):
if self.parser.has_option("mai-bot-cfg", "direction"):
return self.parser.get("mai-bot-cfg", "direction")
else:
return "left"
def getCommands(self):
if self.parser.has_option("mai-bot-cfg", "commands"):
return str(self.parser.get("mai-bot-cfg", "commands")).split(",")
else:
return ["nearest", "middle-left", "far-left"]
def getTokenOrder(self):
if self.parser.has_option("mai-bot-cfg", "token-order"):
return str(self.parser.get("mai-bot-cfg", "token-order")).split(",")
else:
return ["0","1","2","3","4","5"]
def getDebug(self):
if self.parser.has_option("mai-bot-cfg", "debug"):
return self.parser.getboolean("mai-bot-cfg", "debug")
else:
return False
#def getStart(self):
# if self.parser.has_option("mai-bot-cfg", "start"):
# return self.parser.get("mai-bot-cfg", "start")
# else:
# return "nearest"
#def getMaxHeight(self):
# if self.parser.has_option("mai-bot-cfg", "max-height"):
# return self.parser.getint("mai-bot-cfg", "max-height")
# else:
# return 4
def debugMsg(self, message):
if self.debug != None:
self.debug.printMsg(message, self)
def __str__(self):
return "ConfigReader"
| gpl-3.0 | -65,932,045,673,835,944 | 35.783784 | 80 | 0.582905 | false |
nefarioustim/parker | test/test_crawlpage.py | 1 | 2207 | # -*- coding: utf-8 -*-
"""Test the CrawlPage object."""
import pytest
from parker import parser, crawlpage, parsedpage
from test_client import client_fixture_crawl, client_fixture
from test_page import page_fixture_crawl, page_fixture
import utils
TEST_URI = "http://www.staples.co.uk/"
TEST_CONSUME_SELECTOR = "#PageInner .skuPage"
EXPECTED_URI_COUNT = 300
EXPECTED_URIS = set(utils.load_stub_as_json('expecteduris.json'))
@pytest.fixture(scope="function")
def crawlpage_fixture(page_fixture_crawl):
"""Test fixture to ensure correct mocking for crawlpage."""
return crawlpage.get_instance(
page_fixture_crawl
)
def test_get_instance_creates_crawlpage_object(page_fixture_crawl):
"""Test crawlpage.get_instance creates a CrawlPage object."""
test_crawlpage = crawlpage.get_instance(
page_fixture_crawl
)
expected_repr = "<class 'parker.crawlpage.CrawlPage'>(%s)" % (
TEST_URI
)
assert isinstance(test_crawlpage, crawlpage.CrawlPage)
assert isinstance(test_crawlpage.parsedpage, parsedpage.ParsedPage)
assert test_crawlpage.__repr__() == expected_repr
def test_get_instance_raises_typeerror_unexpected_parameter_type():
"""Test crawlpage.get_instance throws TypeError on unexpected param."""
with pytest.raises(TypeError):
test_crawlpage = crawlpage.get_instance(None)
def test_get_uris_returns_list_of_internal_uris(crawlpage_fixture):
"""Test crawlpage.get_uris returns a set of internal URIs."""
test_crawlpage = crawlpage_fixture
uris = test_crawlpage.get_uris(TEST_URI)
assert isinstance(uris, set)
assert len(uris) == EXPECTED_URI_COUNT
assert uris == EXPECTED_URIS
def test_has_selector_returns_false_if_not(crawlpage_fixture):
"""Test crawlpage.has_selector returns false if selector not in page."""
test_crawlpage = crawlpage_fixture
assert not test_crawlpage.has_selector(TEST_CONSUME_SELECTOR)
def test_has_selector_returns_true_if_has(page_fixture):
"""Test crawlpage.has_selector returns true if selector in page."""
test_crawlpage = crawlpage.get_instance(
page_fixture
)
assert test_crawlpage.has_selector(TEST_CONSUME_SELECTOR)
| gpl-3.0 | -7,499,028,635,234,379,000 | 31.940299 | 76 | 0.7227 | false |
panjia1983/channel_backward | exp/test.py | 1 | 2770 | import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--interactive", action="store_true")
args = parser.parse_args()
import openravepy
import trajoptpy
import json
env = openravepy.Environment()
env.StopSimulation()
env.Load('test.xml')
trajoptpy.SetInteractive(args.interactive) # pause every iteration, until you press 'p'. Press escape to disable further plotting
robot = env.GetRobots()[0]
robot.SetDOFValues([0, 0.7, 0, 0, 0, 0, 0], robot.GetManipulator('leftarm').GetArmIndices())
robot.SetDOFValues([0.5], [22])
robot.SetDOFValues([-1, 1.18, -0.44, 0, 0, 0, 0], robot.GetManipulator('rightarm').GetArmIndices())
joint_target = [-0.21, -0.075, 0, 0, 0, 0, 0]
dynamic_objects = ["mug-shelf", "mug-shelf1", "mug-shelf2", "PR2"]
static_objects = ["bookshelf"]
cost_params = []
for name in dynamic_objects:
cost_params.append({
"name" : name,
"coeffs" : [20],
"dist_pen" : [0.025],
})
for name in static_objects:
cost_params.append({
"name" : name,
"coeffs" : [20],
"dist_pen" : [0.025],
})
request = {
"basic_info": {
"n_steps": 20,
"manip": "leftarm",
"start_fixed": True,
},
"costs": [
{
"type" : "joint_vel",
"params": {"coeffs" : [1]},
},
{
"type": "continuous_collision",
"name": "cont_collision",
"params": {
"object_costs": cost_params,
}
}
],
"constraints": [
{
"type": "joint",
"params": {"vals": joint_target},
},
],
"init_info": {
"type": "straight_line",
"endpoint": joint_target,
}
}
#robot.SetDOFValues(
# [
# 0, 0, 0,
# 0, 0, 0,
# 0, 0, 0,
# 0, 0, 0,
# 0, 0, 0,
# 0, 0.7, 0,
# 0, 0, 0,
# 0, 0.5, 0,
# 0, 0, 0,
# -1, 1.18, -0.44,
# 0, 0, 0,
# 0, 0, 0,
# 0, 0, 0
# ]
#)
#robot.SetDOFValues(
# [0, 0, 0,
# 0, 0, 0,
# 0, 0, 0,
# 0, 0, 0,
# 0, 0, 0,
# -0.21, -0.075, 0,
# 0, 0.0, 0,
# 0, 0.5, 0,
# 0, 0, 0,
# -1, 1.18, -0.44,
# 0, 0, 0,
# 0, 0, 0,
# 0, 0, 0]
#)
s = json.dumps(request) # convert dictionary into json-formatted string
prob = trajoptpy.ConstructProblem(s, env) # create object that stores optimization problem
result = trajoptpy.OptimizeProblem(prob) # do optimization
print result
from trajoptpy.check_traj import traj_is_safe
prob.SetRobotActiveDOFs() # set robot DOFs to DOFs in optimization problem
assert traj_is_safe(result.GetTraj(), robot) # Check that trajectory is collision free
| bsd-2-clause | 5,083,429,379,926,947,000 | 22.87931 | 129 | 0.513718 | false |
cdeil/naima | examples/CrabNebula_proton.py | 1 | 2205 | #!/usr/bin/env python
import numpy as np
import naima
from astropy import units as u
from astropy.io import ascii
## Read data
data=ascii.read('CrabNebula_HESS_2006.dat')
## Set initial parameters
p0=np.array((474,2.34,np.log10(80.),))
labels=['norm','index','log10(cutoff)']
## Model definition
ph_energy = u.Quantity(data['energy'])
# peak gamma ph_energy production is ~0.1*Ep, so enemid corresponds to Ep=10*enemid
# If a cutoff is present, this should be reduced to reduce parameter correlation
e_0 = 5.*np.sqrt(ph_energy[0]*ph_energy[-1])
from naima.models import PionDecay, ExponentialCutoffPowerLaw
ECPL = ExponentialCutoffPowerLaw(1 / u.TeV, e_0, 2, 60. * u.TeV)
PP = PionDecay(ECPL)
distance = 2.0 * u.kpc
Epmin = ph_energy[0]*1e-2
Epmax = ph_energy[-1]*1e3
proton_energy = np.logspace(np.log10(Epmin.value),
np.log10(Epmax.value),50)*ph_energy.unit
def ppgamma(pars,data):
PP.particle_distribution.amplitude = pars[0] / u.TeV
PP.particle_distribution.alpha = pars[1]
PP.particle_distribution.e_cutoff = (10**pars[2])*u.TeV
# convert to same units as observed differential spectrum
model = PP.flux(data,distance).to('1/(s cm2 TeV)')
# Save a realization of the particle distribution to the metadata blob
proton_dist= PP.particle_distribution(proton_energy)
return model, model, (proton_energy, proton_dist)
## Prior definition
def lnprior(pars):
"""
Return probability of parameter values according to prior knowledge.
Parameter limits should be done here through uniform prior ditributions
"""
logprob = naima.uniform_prior(pars[0],0.,np.inf) \
+ naima.uniform_prior(pars[1],-1,5)
return logprob
if __name__=='__main__':
## Run sampler
sampler,pos = naima.run_sampler(data_table=data, p0=p0, labels=labels,
model=ppgamma, prior=lnprior, nwalkers=16, nburn=50, nrun=10,
threads=4)
## Save sampler
from astropy.extern import six
from six.moves import cPickle
sampler.pool=None
cPickle.dump(sampler,open('CrabNebula_proton_sampler.pickle','wb'))
## Diagnostic plots
naima.generate_diagnostic_plots('CrabNebula_proton',sampler,sed=True)
| bsd-3-clause | -4,077,216,500,975,527,400 | 26.222222 | 83 | 0.696599 | false |
wiredfool/fmod | fmod/controllers/moderate.py | 1 | 2153 | import logging
from pylons import request, response, session, url, tmpl_context as c
from pylons.controllers.util import abort, redirect
from fmod.lib.base import BaseController, render
from fmod import model
log = logging.getLogger(__name__)
class ModerateController(BaseController):
requires_auth=True
def __before__(self):
BaseController.__before__(self)
# logged in...
c.username = session['user']
if not session['mod']:
redirect(url('/ping/index'))
#if not request.method=='GET': #UNDONE POST
# throw("Error - must GET")
def _get_decision(self, id, flag):
if id == None:
raise "Error - Need an image id"
d = model.Decision()
setattr(d, flag, True)
d.image = id
d.username = c.username
d.save()
d.update_pings()
d.commit()
return d
def _remove(self, d, tag, rule=None):
img = d.getImage()
if img.in_pool():
tags = [tag, 'removed-from-strobist-pool']
if rule:
tags.append('see-rule-%s'%rule)
img.tag(tags, session['token'])
ret = img.remove_from_group(session['token'])
if ret:
return "Success"
else:
return "Could not remove from pool"
else:
return "Not in pool"
def defer(self, id=None):
#this is a noop.
return "Success"
def ok(self, id=None):
try:
self._get_decision(id, 'fl_ok')
return "Success"
except Exception, msg:
return msg
def ns(self, id=None):
try:
d = self._get_decision(id, 'fl_ns')
return self._remove(d, 'no-off-camera-flash',1)
except Exception, msg:
return msg
def nsi(self, id=None):
try:
d = self._get_decision(id, 'fl_nsi')
return self._remove(d, 'no-strobist-info',2)
except Exception, msg:
return msg
def isi(self, id=None):
try:
d = self._get_decision(id, 'fl_isi')
return self._remove(d, 'incomplete-strobist-info',2)
except Exception, msg:
return msg
def nsfw(self, id=None):
try:
d = self._get_decision(id, 'fl_nsfw')
return self._remove(d, 'NSFW',3)
except Exception, msg:
return msg
def bump(self, id=None):
try:
d = self._get_decision(id, 'fl_bump')
return self._remove(d, 'no-bumping')
except Exception, msg:
return msg
| gpl-2.0 | -7,267,325,370,894,829,000 | 20.747475 | 69 | 0.645611 | false |
schleichdi2/OpenNfr_E2_Gui-6.0 | lib/python/Screens/InputBox.py | 1 | 5801 | from enigma import eRCInput, getPrevAsciiCode
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.Input import Input
from Components.Pixmap import Pixmap
from Tools.BoundFunction import boundFunction
from Tools.Notifications import AddPopup
from time import time
from Components.config import config
class InputBox(Screen):
def __init__(self, session, title = "", windowTitle = _("Input"), useableChars = None, **kwargs):
Screen.__init__(self, session)
self["text"] = Label(title)
self["input"] = Input(**kwargs)
self["VKeyIcon"] = Pixmap()
self["help_text"] = Label(_("use virtual keyboard for text input"))
self.onShown.append(boundFunction(self.setTitle, windowTitle))
if useableChars is not None:
self["input"].setUseableChars(useableChars)
self["actions"] = NumberActionMap(["WizardActions", "InputBoxActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.gotAsciiCode,
"ok": self.go,
"back": self.cancel,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDelete,
"deleteBackward": self.keyBackspace,
"tab": self.keyTab,
"toggleOverwrite": self.keyInsert,
"showVirtualKeyboard": self.virtualKeyBoard,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
if self["input"].type == Input.TEXT:
if config.misc.remotecontrol_text_support.value:
self.onExecBegin.append(self.setKeyboardModeNone)
else:
self.onExecBegin.append(self.setKeyboardModeAscii)
else:
self.onExecBegin.append(self.setKeyboardModeNone)
def virtualKeyBoard(self):
self.input_text = self["input"].getText()
input_title = self["text"].getText()
self.session.openWithCallback(self.virtualKeyBoardCB, VirtualKeyBoard, title = input_title, text = self.input_text)
def virtualKeyBoardCB(self, res):
if res:
self.input_text = res
self["input"].setText(self.input_text)
self["input"].end()
def gotAsciiCode(self):
self["input"].handleAscii(getPrevAsciiCode())
def keyLeft(self):
self["input"].left()
def keyRight(self):
self["input"].right()
def keyNumberGlobal(self, number):
self["input"].number(number)
def keyDelete(self):
self["input"].delete()
def go(self):
self.close(self["input"].getText())
def cancel(self):
self.close(None)
def keyHome(self):
self["input"].home()
def keyEnd(self):
self["input"].end()
def keyBackspace(self):
self["input"].deleteBackward()
def keyTab(self):
self["input"].tab()
def keyInsert(self):
self["input"].toggleOverwrite()
class PinInput(InputBox):
def __init__(self, session, service = "", triesEntry = None, pinList = [], popup = False, simple=True, *args, **kwargs):
InputBox.__init__(self, session = session, text = " ", maxSize = True, type = Input.PIN, *args, **kwargs)
self.waitTime = 15
self.triesEntry = triesEntry
self.pinList = pinList
self["service"] = Label(service)
if service and simple:
self.skinName = "PinInputPopup"
if self.getTries() == 0:
if (self.triesEntry.time.value + (self.waitTime * 60)) > time():
remaining = (self.triesEntry.time.value + (self.waitTime * 60)) - time()
remainingMinutes = int(remaining / 60)
remainingSeconds = int(remaining % 60)
messageText = _("You have to wait %s!") % (str(remainingMinutes) + " " + _("minutes") + ", " + str(remainingSeconds) + " " + _("seconds"))
if service and simple:
AddPopup(messageText, type = MessageBox.TYPE_ERROR, timeout = 3)
self.closePinCancel()
else:
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.closePinCancel, MessageBox, messageText, MessageBox.TYPE_ERROR, timeout = 3))
else:
self.setTries(3)
self["tries"] = Label("")
self.onShown.append(self.showTries)
def gotAsciiCode(self):
if self["input"].currPos == len(self["input"]) - 1:
InputBox.gotAsciiCode(self)
self.go()
else:
InputBox.gotAsciiCode(self)
def keyNumberGlobal(self, number):
if self["input"].currPos == len(self["input"]) - 1:
InputBox.keyNumberGlobal(self, number)
self.go()
else:
InputBox.keyNumberGlobal(self, number)
def checkPin(self, pin):
if pin is not None and " " not in pin and int(pin) in self.pinList:
return True
return False
def go(self):
if self.pinList:
self.triesEntry.time.value = int(time())
self.triesEntry.time.save()
if self.checkPin(self["input"].getText()):
self.setTries(3)
self.closePinCorrect()
else:
self.keyHome()
self.decTries()
if self.getTries() == 0:
self.closePinWrong()
else:
pin = self["input"].getText()
if pin and pin.isdigit():
self.close(int(pin))
else:
self.close(None)
def closePinWrong(self, *args):
print "args:", args
self.close(False)
def closePinCorrect(self, *args):
self.setTries(3)
self.close(True)
def closePinCancel(self, *args):
self.close(None)
def cancel(self):
self.closePinCancel()
def getTries(self):
return self.triesEntry and self.triesEntry.tries.value
def decTries(self):
self.setTries(self.triesEntry.tries.value - 1)
self.showTries()
def setTries(self, tries):
self.triesEntry.tries.value = tries
self.triesEntry.tries.save()
def showTries(self):
self["tries"].setText(self.triesEntry and _("Tries left:") + " " + str(self.getTries() or ""))
def keyRight(self):
pass
| gpl-2.0 | 7,238,666,065,602,169,000 | 27.576355 | 161 | 0.693156 | false |
jessepinnell/xrsrv | python/xrsrv/routine_generators/debug.py | 1 | 1552 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright (c) 2017 Jesse Pinnell
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" debugging generator """
def generate_plan(routine_environment, exercise_data, **kwargs):
""" dumps output helpful for debugging and returns all routines
"""
output = print if "output" not in kwargs else kwargs["output"].write
output(routine_environment)
for exercise_name, exercise in exercise_data.items():
output("{0}:\n {1}".format(exercise_name, exercise))
return [[]]
| mit | -889,194,884,570,660,900 | 46.030303 | 80 | 0.744845 | false |
praekelt/molo | molo/core/backends.py | 1 | 2724 | from django.contrib.auth.models import Group
from django_cas_ng.backends import CASBackend
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.core.exceptions import PermissionDenied
from molo.profiles.models import UserProfile
UserModel = get_user_model()
class MoloModelBackend(ModelBackend):
def authenticate(
self, request, username=None, password=None, *args, **kwargs):
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
if request is not None:
try:
user = UserModel._default_manager.get_by_natural_key(username)
if user.is_superuser:
UserProfile.objects.get(user=user)
else:
UserProfile.objects.get(user=user, site=request.site)
except UserProfile.DoesNotExist:
raise PermissionDenied
except UserModel.DoesNotExist:
UserModel().set_password(password)
return super(MoloModelBackend, self).authenticate(
request=request, username=username, password=password, **kwargs)
class MoloCASBackend(CASBackend):
def authenticate(self, request, ticket, service):
user = super(
MoloCASBackend, self).authenticate(request, ticket, service)
if user is None:
return None
if 'attributes' in request.session \
and 'has_perm' in request.session['attributes']\
and request.session['attributes']['has_perm'] == 'True':
if request.session['attributes']['is_admin'] == 'True':
user.email = request.session['attributes']['email']
user.is_staff = True
user.is_superuser = True
user.save()
else:
wagtail_login_only_group = Group.objects.filter(
name='Wagtail Login Only').first()
if wagtail_login_only_group and not user.groups.exists():
user.groups.add(wagtail_login_only_group)
elif not user.profile.admin_sites.filter(
pk=request.site.pk).exists():
return None
"""
TODO: Handle case where Moderator group does not exist.
We need to log this or find ways of notifying users that
the moderator group was removed or renamed.
There isn't much we can do about this case though.
"""
else:
user.is_staff = False
user.is_superuser = False
user.save()
return None
return user
| bsd-2-clause | -3,061,280,361,301,114,400 | 36.315068 | 78 | 0.585536 | false |
lem9/weblate | weblate/gitexport/views.py | 1 | 5514 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from base64 import b64decode
from email import message_from_string
import os.path
import subprocess
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.http.response import HttpResponseServerError, HttpResponse
from django.shortcuts import redirect
from django.utils.encoding import force_text
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import never_cache
from weblate.trans.views.helper import get_subproject
from weblate.permissions.helpers import can_access_vcs
GIT_PATHS = [
'/usr/lib/git',
'/usr/lib/git-core',
]
def find_git_http_backend():
"""Find git http backend"""
if hasattr(find_git_http_backend, 'result'):
return find_git_http_backend.result
try:
path = subprocess.check_output(['git', '--exec-path']).decode('utf-8')
if path:
GIT_PATHS.insert(0, path)
except OSError:
pass
for path in GIT_PATHS:
name = os.path.join(path, 'git-http-backend')
if os.path.exists(name):
find_git_http_backend.result = name
return name
def response_authenticate():
"""Return 401 response with authenticate header."""
response = HttpResponse(status=401)
response['WWW-Authenticate'] = 'Basic realm="Weblate Git access"'
return response
def authenticate(request, auth):
"""Perform authentication with HTTP Basic auth"""
auth = force_text(auth, encoding='iso-8859-1')
try:
method, data = auth.split(None, 1)
if method.lower() == 'basic':
username, code = b64decode(data).decode('iso-8859-1').split(':', 1)
try:
user = User.objects.get(
username=username,
auth_token__key=code
)
except User.DoesNotExist:
return False
if not user.is_active:
return False
request.user = user
return True
else:
return False
except (ValueError, TypeError):
return False
@never_cache
@csrf_exempt
def git_export(request, project, subproject, path):
"""Git HTTP server view.
Wrapper around git-http-backend to provide Git repositories export over
HTTP. Performs permission checks and hands over execution to the wrapper.
"""
# Probably browser access
if path == '':
return redirect(
'subproject',
project=project,
subproject=subproject,
permanent=False
)
# HTTP authentication
auth = request.META.get('HTTP_AUTHORIZATION', b'')
if auth and not authenticate(request, auth):
return response_authenticate()
# Permissions
try:
obj = get_subproject(request, project, subproject)
except Http404:
if not request.user.is_authenticated():
return response_authenticate()
raise
if not can_access_vcs(request.user, obj.project):
raise PermissionDenied('No VCS permissions')
return run_git_http(request, obj, path)
def run_git_http(request, obj, path):
"""Git HTTP backend execution wrapper."""
# Find Git HTTP backend
git_http_backend = find_git_http_backend()
if git_http_backend is None:
return HttpResponseServerError('git-http-backend not found')
# Invoke Git HTTP backend
process = subprocess.Popen(
[git_http_backend],
env={
'REQUEST_METHOD': request.method,
'PATH_TRANSLATED': os.path.join(obj.get_path(), path),
'GIT_HTTP_EXPORT_ALL': '1',
'CONTENT_TYPE': request.META.get('CONTENT_TYPE', ''),
'QUERY_STRING': request.META.get('QUERY_STRING', ''),
'HTTP_CONTENT_ENCODING': request.META.get(
'HTTP_CONTENT_ENCODING', ''
),
},
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
output, output_err = process.communicate(request.body)
retcode = process.poll()
# Log error
if output_err:
obj.log_error('git: {0}'.format(force_text(output_err)))
# Handle failure
if retcode:
return HttpResponseServerError(output_err)
headers, content = output.split(b'\r\n\r\n', 1)
message = message_from_string(headers.decode('utf-8'))
# Handle status in response
if 'status' in message:
return HttpResponse(
status=int(message['status'].split()[0])
)
# Send content
response = HttpResponse(
content_type=message['content-type']
)
response.write(content)
return response
| gpl-3.0 | -5,599,721,127,871,912,000 | 29.114754 | 79 | 0.640537 | false |
subodhchhabra/glances | glances/compat.py | 1 | 3221 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# flake8: noqa
# pylint: skip-file
"""Python 2/3 compatibility shims."""
import operator
import sys
PY3 = sys.version_info[0] == 3
if PY3:
import queue
from configparser import ConfigParser, NoOptionError, NoSectionError
from xmlrpc.client import Fault, ProtocolError, ServerProxy, Transport
from xmlrpc.server import SimpleXMLRPCRequestHandler, SimpleXMLRPCServer
input = input
range = range
map = map
text_type = str
binary_type = bytes
viewkeys = operator.methodcaller('keys')
viewvalues = operator.methodcaller('values')
viewitems = operator.methodcaller('items')
def listitems(d):
return list(d.items())
def listkeys(d):
return list(d.keys())
def listvalues(d):
return list(d.values())
def iteritems(d):
return iter(d.items())
def iterkeys(d):
return iter(d.keys())
def itervalues(d):
return iter(d.values())
def u(s):
return s
def b(s):
if isinstance(s, binary_type):
return s
return s.encode('latin-1')
def nativestr(s):
if isinstance(s, text_type):
return s
return s.decode('utf-8', 'replace')
else:
import Queue as queue
from itertools import imap as map
from ConfigParser import SafeConfigParser as ConfigParser, NoOptionError, NoSectionError
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler, SimpleXMLRPCServer
from xmlrpclib import Fault, ProtocolError, ServerProxy, Transport
input = raw_input
range = xrange
ConfigParser.read_file = ConfigParser.readfp
text_type = unicode
binary_type = str
viewkeys = operator.methodcaller('viewkeys')
viewvalues = operator.methodcaller('viewvalues')
viewitems = operator.methodcaller('viewitems')
def listitems(d):
return d.items()
def listkeys(d):
return d.keys()
def listvalues(d):
return d.values()
def iteritems(d):
return d.iteritems()
def iterkeys(d):
return d.iterkeys()
def itervalues(d):
return d.itervalues()
def u(s):
return s.decode('utf-8')
def b(s):
return s
def nativestr(s):
if isinstance(s, binary_type):
return s
return s.encode('utf-8', 'replace')
try:
# Python 2.6
from logutils.dictconfig import dictConfig
except ImportError:
from logging.config import dictConfig
| lgpl-3.0 | -2,278,421,444,800,674,800 | 24.362205 | 92 | 0.667184 | false |
mahandra/recipes_video_conv | local_scripts/ipaddreslogfilenginx.py | 1 | 2172 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
__author__ = 'mah'
__email__ = '[email protected]'
import os
import logging.handlers
import re
log = logging.getLogger('Logging for check_sound')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler = logging.handlers.RotatingFileHandler(os.path.splitext(os.path.basename(__file__))[0] + '.log',
maxBytes=(1048576 * 5),
backupCount=5)
handler.setFormatter(formatter)
consolehandler = logging.StreamHandler() # for stdout
consolehandler.setFormatter(formatter)
log.addHandler(consolehandler)
log.addHandler(handler)
espsip = [
'75.118.34.94',
'172.12.84.222',
'64.53.234.20',
'73.16.137.91',
'104.51.192.248',
'68.83.215.73',
'73.178.242.57',
'71.202.71.229',
'65.128.78.35',
'73.44.14.111',
'24.10.65.66',
'100.14.48.172',
'73.183.241.96',
'69.115.90.83',
'117.197.129.168',
'47.20.70.84',
'208.59.187.242',
'70.176.209.44',
'76.16.113.22',
'50.47.132.68',
'98.223.121.76',
'35.128.24.125',
'75.118.149.204',
'67.170.175.139',
'162.213.78.32',
'73.27.55.238',
'67.4.213.95',
'108.16.239.210',
'73.110.27.155',
'71.228.23.63',
'47.34.210.9',
'73.211.202.139',
'47.187.106.177',
'50.167.154.182',
'107.3.129.14',
'12.185.249.139',
'24.187.19.54',
'67.184.85.60',
'173.22.125.78',
'63.225.196.19',
'68.82.249.67',
'104.186.108.65',
'98.176.171.206',
'47.198.141.184',
'100.14.88.98',
'108.223.7.64',
'68.173.247.131',
'208.104.48.61',
'131.103.138.15',
'180.188.233.82',
'174.113.130.205',
'76.187.199.85',
]
def main():
with open('nginx_access_pf.log', 'r') as fd:
c = 1
exist =[]
for i in fd:
# print ':', i
for ip in espsip:
m = re.search(ip, i)
if m:
print i
exist.append(ip)
c += 1
# if c == 10:
# break
print 'exist:', exist
with open('existip', 'w') as f:
for i in exist:
f.write(i + '\n')
if __name__ == '__main__':
log.info('Start main')
main()
| gpl-2.0 | 5,657,711,219,305,236,000 | 19.884615 | 104 | 0.546961 | false |
ulikoehler/ODBPy | ODBPy/DrillTools.py | 1 | 2203 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Read the structured text ODB++ drill tools file
"""
import gzip
from collections import namedtuple, defaultdict
import os.path
from enum import Enum
from .Utils import readFileLines
from .StructuredTextParser import read_structured_text
from .Structures import HolePlating
__all__ = ["DrillToolSet", "DrillTool", "DrillToolType", "parse_drill_tools", "read_drill_tools"]
DrillToolSet = namedtuple("DrillToolSet", ["metadata", "tools"])
DrillTool = namedtuple("DrillTool", ["num", "type", "tooltype", "size", "info"]) # size in mil
_drill_plating_map = {
"VIA": HolePlating.Via,
"NON_PLATED": HolePlating.NonPlated,
"PLATED": HolePlating.Plated
}
class DrillToolType(Enum):
"""Drill tool type, i.e the TYPE2 field of the tools file"""
Standard = 1
Photo = 2
Laser = 3
PressFit = 4
_drill_tool_type_map = {
"STANDARD": DrillToolType.Standard,
"PHOTO": DrillToolType.Photo,
"LASER": DrillToolType.Laser,
"PRESS_FIT": DrillToolType.PressFit
}
def structured_array_to_drill_tool(array):
if array.name not in ["TOOL", "TOOLS"]:
raise ValueError("Array {} does not have TOOLS name but {}".format(
array, array.name))
info = {
k: v for k, v in array.attributes.items()
# Remove keys which are used in the tool directly
if k not in ["NUM", "TYPE", "DRILL_SIZE", "TYPE2"]
}
return DrillTool(array.attributes["NUM"],
_drill_plating_map[array.attributes["TYPE"]],
_drill_tool_type_map[array.attributes["TYPE2"]],
array.attributes["DRILL_SIZE"], info)
def parse_drill_tools(structured_text):
"""Parse a DrillToolSet from a StructuredText set"""
metadata, arrays = structured_text
tools = (structured_array_to_drill_tool(array) for array in arrays)
toolmap = {
tool.num: tool for tool in tools
}
return DrillToolSet(metadata, toolmap)
def read_drill_tools(odbpath):
"Read the drill tools from a given ODB++ directory"
stext = read_structured_text(os.path.join(odbpath, "steps/pcb/layers/through_drill/tools"))
return parse_drill_tools(stext)
| apache-2.0 | -3,222,016,735,727,269,400 | 32.892308 | 97 | 0.660463 | false |
bdcht/crysp | crysp/mode.py | 1 | 7354 | # -*- coding: utf-8 -*-
# This code is part of crysp
# Copyright (C) 2013 Axel Tillequin ([email protected])
# published under GPLv2 license
from crysp.padding import nopadding,pkcs7
from io import BytesIO
from crysp.bits import pack, unpack, Bits
# -----------------------------------------------------------------------------
# Mode of Operation Core class, default padding is nopadding.
class Mode(object):
def __init__(self,cipher,pad=nopadding):
self._cipher = cipher
self.pad = pad(l=cipher.blocksize)
@property
def len(self):
return self._cipher.blocksize//8
def iterblocks(self,M,**kargs):
for B in self.pad.iterblocks(M,**kargs):
yield B
# mandatory API:
def enc(self,M):
raise NotImplementedError
def dec(self,C):
raise NotImplementedError
# xor input byte strings (over min length):
def xorstr(self,a,b):
a = bytes(a)
b = bytes(b)
return bytes([x^y for (x,y) in zip(a,b)])
# -----------------------------------------------------------------------------
# Electronic Code Book, default padding is pkcs7
class ECB(Mode):
def __init__(self,cipher,pad=pkcs7):
super().__init__(cipher,pad)
# encryption mode
def enc(self,M):
C = []
for b in self.iterblocks(M):
C.append(self._cipher.enc(b))
return b''.join(C)
# decryption mode
def dec(self,C):
n,p = divmod(len(C),self.len)
assert p==0
P = BytesIO(C)
M = []
for b in range(n):
M.append(self._cipher.dec(P.read(self.len)))
return self.pad.remove(b''.join(M))
# -----------------------------------------------------------------------------
# Electronic Code Book with Cypher Text Stealing (nopadding)
class CTS_ECB(Mode):
def __init__(self,cipher,pad=nopadding):
super().__init__(cipher,pad)
# encryption mode
def enc(self,M):
n,p = divmod(len(M),self.len)
C = []
for b in self.iterblocks(M[:n*self.len]):
C.append(self._cipher.enc(b))
if p>0:
clast = C.pop()
b = self.iterblocks(M[n*self.len:])[0]
C.append(self._cipher.enc(b+clast[p:]))
C.append(clast[0:p])
return b''.join(C)
# decryption mode
def dec(self,C):
n,p = divmod(len(C),self.len)
P = BytesIO(C)
M = []
for b in range(n):
M.append(self._cipher.dec(P.read(self.len)))
if p>0:
mlast = M.pop()
M.append(self._cipher.dec(P.read(p)+mast[p:]))
M.append(mlast[:p])
return b''.join(M)
# -----------------------------------------------------------------------------
# Cipher Block Chaining, default padding is pkcs7
class CBC(Mode):
def __init__(self,cipher,IV,pad=pkcs7):
super().__init__(cipher,pad)
assert len(IV)==self.len
self.IV = IV
# encryption mode
def enc(self,M):
C = [self.IV]
for b in self.iterblocks(M):
x = self.xorstr(b,C[-1])
C.append(self._cipher.enc(x))
return b''.join(C)
# decryption mode
def dec(self,C):
l = self.len
n,p = divmod(len(C),l)
assert p==0
M = []
while len(C)>l:
c = C[-l:]
C = C[:-l]
M.insert(0,self.xorstr(C[-l:],self._cipher.dec(c)))
return self.pad.remove(b''.join(M))
# -----------------------------------------------------------------------------
# Cipher Block Chaining with Cipher Text Stealing (nopadding)
class CTS_CBC(Mode):
def __init__(self,cipher,IV,pad=nopadding):
super().__init__(cipher,pad)
assert len(IV)==self.len
self.IV = IV
# encryption mode
def enc(self,M):
n,p = divmod(len(M),self.len)
C = [self.IV]
for b in self.iterblocks(M[:n*self.len]):
x = self.xorstr(b,C[-1])
C.append(self._cipher.enc(x))
if p>0:
clast = C.pop()
b = self.iterblocks(M[n*self.len:]).ljust(self.len,b'\0')
x = self.xorstr(b,clast)
C.append(self._cipher.enc(x))
C.append(clast[:p])
return b''.join(C)
# decryption mode
def dec(self,C):
l = self.len
n,p = divmod(len(C),l)
M = []
if p>0:
clast = C[-p:]
C = C[:-p]
cend = C[-l:]
C = C[:-l]
mend = self._cipher.dec(cend)
mprev = self._cipher.dec(clast+mend[p:])
M.insert(0,self.xorstr(clast,mend[:p]))
M.insert(0,self.xorstr(C[-l:],mprev))
C = self.IV+C
while len(C)>l:
c = C[-l:]
C = C[:-l]
M.insert(0,self.xorstr(C[-l:],self._cipher.dec(c)))
return b''.join(M)
# -----------------------------------------------------------------------------
# Counter mode with provided iterable counter (no padding)
class DefaultCounter:
def __init__(self,bytesize,iv=None):
self.bytesize = bytesize
if iv is not None:
x = bytesize//2
assert len(iv)==bytesize
self.setup(iv[0:x],iv[x:])
def setup(self,nonce=None,count=None):
l = self.bytesize
if nonce is None:
nonce = b'\0'*(l//2)
if count is None:
count = b'\0'*(l//2)
self.nonce = nonce
self.count0 = count
return self
def reset(self):
self.count = Bits(*unpack(self.count0,'>L'))
def __call__(self):
try:
res = pack(self.count,'>L')
self.count += 1
return self.nonce+res
except AttributeError:
print("setup and reset counter is needed")
class CTR(Mode):
def __init__(self,cipher,counter=None):
super().__init__(cipher)
if counter is None:
counter = DefaultCounter(self.len)
elif isinstance(counter,bytes):
counter = DefaultCounter(self.len,counter)
self.counter = counter
# encryption mode
def enc(self,M):
self.counter.reset()
self.pad.reset()
C = []
for b in self.iterblocks(M):
c = self.counter()
k = self._cipher.enc(c)
x = self.xorstr(b,k)
C.append(x)
return b''.join(C)
# decryption mode
def dec(self,C):
self.counter.reset()
self.pad.reset()
P = self.enc(C)
n,p = divmod(len(C),self.len)
if p>0:
assert len(P)==n+1
res = P[:-p]
else:
assert len(P)==n
res = P
return res
# -----------------------------------------------------------------------------
# Chain mode of Operation Core class for Digest algorithms, nopadding default
class Chain(object):
def __init__(self,cipherclass,pad=nopadding):
self._cipherclass = cipherclass
self.pad = pad
def iterblocks(self,M,**kargs):
for b in self.pad.iterblocks(M,**kargs):
yield b
# mandatory API:
def __call__(self,M):
raise NotImplementedError
# xor input byte strings (over min length):
def xorstr(self,a,b):
a = bytes(a)
b = bytes(b)
return bytes([x^y for (x,y) in zip(a,b)])
| gpl-2.0 | 8,923,609,616,704,921,000 | 29.263374 | 79 | 0.483682 | false |
OpenMined/PySyft | packages/syft/tests/syft/core/pointer/pointer_test.py | 1 | 6660 | # stdlib
from io import StringIO
import sys
from typing import Any
from typing import List
# third party
import pytest
import torch as th
# syft absolute
import syft as sy
from syft.core.node.common.client import AbstractNodeClient
from syft.core.pointer.pointer import Pointer
def validate_output(data: Any, data_ptr: Pointer) -> None:
old_stdout = sys.stdout
sys.stdout = newstdout = StringIO()
data_ptr.print()
sys.stdout = old_stdout
assert newstdout.getvalue().strip("\n") == str(repr(data))
def validate_permission_error(data_ptr: Pointer) -> None:
old_stdout = sys.stdout
sys.stdout = newstdout = StringIO()
data_ptr.print()
sys.stdout = old_stdout
assert newstdout.getvalue().startswith("No permission to print")
@pytest.mark.slow
@pytest.mark.parametrize("with_verify_key", [True, False])
def test_make_pointable(
with_verify_key: bool,
node: sy.VirtualMachine,
client: sy.VirtualMachineClient,
root_client: sy.VirtualMachineClient,
) -> None:
ten = th.tensor([1, 2])
ptr = ten.send(root_client, pointable=False)
assert len(client.store) == 0
if with_verify_key:
ptr.update_searchability(target_verify_key=client.verify_key)
else:
ptr.update_searchability()
assert len(client.store) == 1
@pytest.mark.slow
@pytest.mark.parametrize("with_verify_key", [True, False])
def test_make_unpointable(
with_verify_key: bool,
node: sy.VirtualMachine,
client: sy.VirtualMachineClient,
root_client: sy.VirtualMachineClient,
) -> None:
ten = th.tensor([1, 2])
ptr = ten.send(root_client, pointable=False)
if with_verify_key:
ptr.update_searchability(target_verify_key=client.verify_key)
else:
ptr.update_searchability()
assert len(client.store) == 1
if with_verify_key:
ptr.update_searchability(pointable=False, target_verify_key=client.verify_key)
else:
ptr.update_searchability(pointable=False)
assert len(client.store) == 0
@pytest.mark.slow
def test_pointable_property(
client: sy.VirtualMachineClient, root_client: sy.VirtualMachineClient
) -> None:
ten = th.tensor([1, 2])
ptr = ten.send(root_client, pointable=False)
assert len(client.store) == 0
ptr.pointable = False
assert len(client.store) == 0
ptr.pointable = True
assert len(client.store) == 1
ptr.pointable = True
assert len(client.store) == 1
ptr.pointable = False
assert len(client.store) == 0
@pytest.mark.slow
@pytest.mark.xfail
def test_tags(root_client: sy.VirtualMachineClient) -> None:
ten = th.tensor([1, 2])
ten = ten.tag("tag1", "tag1", "other")
assert ten.tags == ["tag1", "other"]
# .send without `tags` passed in
ptr = ten.send(root_client)
assert ptr.tags == ["tag1", "other"]
# .send with `tags` passed in
ptr = ten.send(root_client, tags=["tag2", "tag2", "other"])
assert ten.tags == ["tag2", "other"]
assert ptr.tags == ["tag2", "other"]
th.Tensor([1, 2, 3]).send(root_client, pointable=True, tags=["a"])
th.Tensor([1, 2, 3]).send(root_client, pointable=True, tags=["b"])
th.Tensor([1, 2, 3]).send(root_client, pointable=True, tags=["c"])
th.Tensor([1, 2, 3]).send(root_client, pointable=True, tags=["d"])
sy.lib.python.Int(2).send(root_client, pointable=True, tags=["e"])
sy.lib.python.List([1, 2, 3]).send(root_client, pointable=True, tags=["f"])
a = root_client.store["a"]
b = root_client.store["b"]
c = root_client.store["c"]
d = root_client.store["d"]
e = root_client.store["e"]
result_ptr = a.requires_grad
assert result_ptr.tags == ["a", "requires_grad"]
result_ptr = b.pow(e)
assert result_ptr.tags == ["b", "e", "pow"]
result_ptr = c.pow(exponent=e)
assert result_ptr.tags == ["c", "e", "pow"]
result_ptr = root_client.torch.pow(d, e)
assert result_ptr.tags == ["d", "e", "pow"]
result_ptr = root_client.torch.pow(d, 3)
assert result_ptr.tags == ["d", "pow"]
# __len__ auto gets if you have permission
f_root = root_client.store["f"]
assert len(f_root) == 3
def test_auto_approve_length_request(client: sy.VirtualMachineClient) -> None:
remote_list = sy.lib.python.List([1, 2, 3]).send(client)
result_len_ptr = remote_list.len()
assert result_len_ptr is not None
assert result_len_ptr.get() == 3
remote_list = client.syft.lib.python.List([1, 2, 3])
result_len_ptr = remote_list.len()
assert result_len_ptr is not None
assert result_len_ptr.get() == 3
def test_description(root_client: sy.VirtualMachineClient) -> None:
ten = th.tensor([1, 2])
ten = ten.describe("description 1")
assert ten.description == "description 1"
# .send without `description` passed in
ptr = ten.send(root_client)
assert ptr.description == "description 1"
# .send with `description` passed in
ptr = ten.send(root_client, description="description 2")
assert ten.description == "description 2"
assert ptr.description == "description 2"
def test_printing(
client: sy.VirtualMachineClient, root_client: sy.VirtualMachineClient
) -> None:
data_types = [
sy.lib.python.Int(1),
sy.lib.python.Float(1.5),
sy.lib.python.Bool(True),
sy.lib.python.List([1, 2, 3]),
sy.lib.python.Tuple((1, 2, 3)),
th.tensor([1, 2, 3]),
]
for data in data_types:
validate_output(data, data.send(root_client))
for data in data_types:
validate_permission_error(data.send(client))
@pytest.mark.slow
def test_printing_remote_creation(
client: sy.VirtualMachineClient, root_client: sy.VirtualMachineClient
) -> None:
def create_data_types(client: AbstractNodeClient) -> List[Pointer]:
return [
client.syft.lib.python.Int(1),
client.syft.lib.python.Float(1.5),
client.syft.lib.python.Bool(True),
client.syft.lib.python.List([1, 2, 3]),
client.syft.lib.python.Tuple((1, 2, 3)),
client.torch.Tensor([1, 2, 3]),
]
for elem in create_data_types(root_client):
out = elem.get(delete_obj=False)
validate_output(out, elem)
for idx, elem in enumerate(create_data_types(client)):
validate_permission_error(elem)
def test_exhausted(root_client: sy.VirtualMachineClient) -> None:
int_ptr = root_client.syft.lib.python.Int(0)
int_ptr.get() # ptr gets exhausted after this call
with pytest.raises(ReferenceError) as e:
int_ptr.get()
assert str(e.value) == "Object has already been deleted. This pointer is exhausted"
| apache-2.0 | 7,374,268,223,427,589,000 | 27.583691 | 87 | 0.644745 | false |
mikeshardmind/SinbadCogs | scheduler/converters.py | 1 | 4607 | from __future__ import annotations
import argparse
import dataclasses
from datetime import datetime, timedelta, timezone
from typing import NamedTuple, Optional, Tuple
from redbot.core.commands import BadArgument, Context
from .time_utils import parse_time, parse_timedelta
class NonNumeric(NamedTuple):
parsed: str
@classmethod
async def convert(cls, context: Context, argument: str):
if argument.isdigit():
raise BadArgument("Event names must contain at least 1 non-numeric value")
return cls(argument)
class NoExitParser(argparse.ArgumentParser):
def error(self, message):
raise BadArgument()
@dataclasses.dataclass()
class Schedule:
start: datetime
command: str
recur: Optional[timedelta] = None
quiet: bool = False
def to_tuple(self) -> Tuple[str, datetime, Optional[timedelta]]:
return self.command, self.start, self.recur
@classmethod
async def convert(cls, ctx: Context, argument: str):
start: datetime
command: Optional[str] = None
recur: Optional[timedelta] = None
command, *arguments = argument.split(" -- ")
if arguments:
argument = " -- ".join(arguments)
else:
command = None
parser = NoExitParser(description="Scheduler event parsing", add_help=False)
parser.add_argument(
"-q", "--quiet", action="store_true", dest="quiet", default=False
)
parser.add_argument("--every", nargs="*", dest="every", default=[])
if not command:
parser.add_argument("command", nargs="*")
at_or_in = parser.add_mutually_exclusive_group()
at_or_in.add_argument("--start-at", nargs="*", dest="at", default=[])
at_or_in.add_argument("--start-in", nargs="*", dest="in", default=[])
try:
vals = vars(parser.parse_args(argument.split(" ")))
except Exception as exc:
raise BadArgument() from exc
if not (vals["at"] or vals["in"]):
raise BadArgument("You must provide one of `--start-in` or `--start-at`")
if not command and not vals["command"]:
raise BadArgument("You have to provide a command to run")
command = command or " ".join(vals["command"])
for delta in ("in", "every"):
if vals[delta]:
parsed = parse_timedelta(" ".join(vals[delta]))
if not parsed:
raise BadArgument("I couldn't understand that time interval")
if delta == "in":
start = datetime.now(timezone.utc) + parsed
else:
recur = parsed
if recur.total_seconds() < 60:
raise BadArgument(
"You can't schedule something to happen that frequently, "
"I'll get ratelimited."
)
if vals["at"]:
try:
start = parse_time(" ".join(vals["at"]))
except Exception:
raise BadArgument("I couldn't understand that starting time.") from None
return cls(command=command, start=start, recur=recur, quiet=vals["quiet"])
class TempMute(NamedTuple):
reason: Optional[str]
start: datetime
@classmethod
async def convert(cls, ctx: Context, argument: str):
start: datetime
reason: str
parser = NoExitParser(description="Scheduler event parsing", add_help=False)
parser.add_argument("reason", nargs="*")
at_or_in = parser.add_mutually_exclusive_group()
at_or_in.add_argument("--until", nargs="*", dest="until", default=[])
at_or_in.add_argument("--for", nargs="*", dest="for", default=[])
try:
vals = vars(parser.parse_args(argument.split()))
except Exception as exc:
raise BadArgument() from exc
if not (vals["until"] or vals["for"]):
raise BadArgument("You must provide one of `--until` or `--for`")
reason = " ".join(vals["reason"])
if vals["for"]:
parsed = parse_timedelta(" ".join(vals["for"]))
if not parsed:
raise BadArgument("I couldn't understand that time interval")
start = datetime.now(timezone.utc) + parsed
if vals["until"]:
try:
start = parse_time(" ".join(vals["at"]))
except Exception:
raise BadArgument("I couldn't understand that unmute time.") from None
return cls(reason, start)
| mit | -8,876,680,537,058,773,000 | 32.384058 | 88 | 0.573692 | false |
luoxsbupt/ibus | ui/gtk/languagebar.py | 1 | 8475 | # vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright(c) 2007-2008 Huang Peng <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or(at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
import gtk
import gtk.gdk as gdk
import gobject
import ibus
import icon
from handle import Handle
from menu import menu_position
from engineabout import EngineAbout
from toolitem import ToolButton,\
ToggleToolButton, \
SeparatorToolItem, \
MenuToolButton
from gettext import dgettext
_ = lambda a : dgettext("ibus", a)
N_ = lambda a : a
ICON_SIZE = gtk.ICON_SIZE_MENU
class LanguageBar(gtk.Toolbar):
__gtype_name__ = "IBusLanguagePanel"
__gsignals__ = {
"property-activate" : (
gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_INT)),
"get-im-menu" : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_PYOBJECT,
()),
"show-engine-about" : (
gobject.SIGNAL_RUN_LAST,
gobject.TYPE_PYOBJECT,
()),
}
def __init__ (self):
super(LanguageBar, self).__init__()
self.__show = 1
self.__enabled = False
self.__has_focus = False
self.__show_im_name = False
self.__im_name = None
self.set_style(gtk.TOOLBAR_BOTH_HORIZ)
self.set_show_arrow(False)
self.set_property("icon-size", ICON_SIZE)
self.__create_ui()
self.__properties = []
self.__toplevel = gtk.Window(gtk.WINDOW_POPUP)
self.__toplevel.connect("size-allocate", self.__toplevel_size_allocate_cb)
self.__toplevel.add(self)
root = gdk.get_default_root_window()
try:
self.__work_area = root.property_get("_NET_WORKAREA")[2]
except:
w, h = root.get_size()
self.__work_area = 0, 0, w, h
self.__position = self.__work_area[0] + self.__work_area[2] - 20, self.__work_area[1] + self.__work_area[3] - 20
self.__toplevel.move(*self.__position)
def __create_ui(self):
# create move handle
self.__handle = gtk.ToolItem()
handle = Handle()
self.__handle.add(handle)
self.insert(self.__handle, -1)
handle.connect("move-end", self.__handle_move_end_cb)
# create input methods menu
# prop = ibus.Property(key = "", type = ibus.PROP_TYPE_TOGGLE, icon = "ibus", tooltip = _("Switch input method"))
self.__im_menu = gtk.ToggleToolButton()
self.__im_menu.set_homogeneous(False)
self.__im_menu.connect("toggled", self.__im_menu_toggled_cb)
self.insert(self.__im_menu, -1)
self.__about_button = gtk.ToolButton(gtk.STOCK_ABOUT)
self.__about_button.set_no_show_all(True)
self.__about_button.set_tooltip_text(_("About the Input Method"))
self.__about_button.connect("clicked", self.__about_button_clicked_cb)
self.insert(self.__about_button, -1)
def __im_menu_toggled_cb(self, widget):
if self.__im_menu.get_active():
menu = self.emit("get-im-menu")
menu.connect("deactivate", self.__im_menu_deactivate_cb)
menu.popup(None, None,
menu_position,
0,
gtk.get_current_event_time(),
widget)
def __about_button_clicked_cb(self, widget):
if self.__enabled:
self.emit("show-engine-about")
def __im_menu_deactivate_cb(self, menu):
self.__im_menu.set_active(False)
def __handle_move_end_cb(self, handle):
x, y = self.__toplevel.get_position()
w, h = self.__toplevel.get_size()
self.__position = x + w, y + h
def __toplevel_size_allocate_cb(self, toplevel, allocation):
x, y = self.__position
if x - self.__work_area[0] >= self.__work_area[2] - 80:
self.__toplevel.move(x - allocation.width, y - allocation.height)
def __remove_properties(self):
# reset all properties
map(lambda i: i.destroy(), self.__properties)
self.__properties = []
def __set_opacity(self, opacity):
if self.__toplevel.window == None:
self.__toplevel.realize()
self.__toplevel.window.set_opacity(opacity)
def do_show(self):
gtk.Toolbar.do_show(self)
def do_size_request(self, requisition):
gtk.Toolbar.do_size_request(self, requisition)
self.__toplevel.resize(1, 1)
def set_im_icon(self, icon_name):
widget = icon.IconWidget(icon_name, 18)
self.__im_menu.set_icon_widget(widget)
def set_show_im_name(self, show):
self.__show_im_name = show
self.set_im_name(self.__im_name)
self.__im_menu.set_is_important(show)
def set_im_name(self, text):
self.__im_name = text
if text:
self.__im_menu.set_tooltip_text(text)
self.__im_menu.set_label(text)
else:
self.__im_menu.set_tooltip_text(_("Switch input method"))
self.__im_menu.set_label("")
def reset(self):
self.__remove_properties()
def set_enabled(self, enabled):
self.__enabled = enabled
if self.__enabled:
self.__about_button.show()
self.__set_opacity(1.0)
if self.__has_focus:
if self.__show in (1, 2):
self.show_all()
else:
self.__about_button.hide()
self.__set_opacity(0.5)
if self.__show in (1, 0):
self.hide_all()
def is_enabled(self):
return self.__enabled
def set_show(self, show):
if show not in (0, 1, 2):
show = 1
self.__show = show
if self.__has_focus:
self.focus_in()
else:
self.focus_out()
def get_show(self):
return self.__show
def register_properties(self, props):
self.__remove_properties()
# create new properties
for i, prop in enumerate(props):
if prop.type == ibus.PROP_TYPE_NORMAL:
item = ToolButton(prop = prop)
elif prop.type == ibus.PROP_TYPE_TOGGLE:
item = ToggleToolButton(prop = prop)
elif prop.type == ibus.PROP_TYPE_MENU:
item = MenuToolButton(prop = prop)
elif prop.type == PROP_TYPE_SEPARATOR:
item = SeparatorToolItem()
else:
raise IBusException("Unknown property type = %d" % prop.type)
item.connect("property-activate",
lambda w, n, s: self.emit("property-activate", n, s))
item.set_sensitive(prop.sensitive)
item.set_no_show_all(True)
if prop.visible:
item.show()
else:
item.hide()
self.__properties.append(item)
self.insert(item, i + 2)
def update_property(self, prop):
map(lambda x: x.update_property(prop), self.__properties)
def show_all(self):
self.__toplevel.show_all()
self.__toplevel.window.raise_()
gtk.Toolbar.show_all(self)
def hide_all(self):
x, y = self.__toplevel.get_position()
self.__toplevel.hide_all()
gtk.Toolbar.hide_all(self)
# save bar position
self.__toplevel.move(x, y)
def focus_in(self):
self.__has_focus = True
self.__im_menu.set_sensitive(True)
if self.__enabled:
if self.__show in (1, 2):
self.show_all()
else:
self.hide_all()
def focus_out(self):
self.__has_focus = False
self.__im_menu.set_sensitive(False)
if self.__show in (0, 1):
self.hide_all()
else:
self.show_all()
| lgpl-2.1 | 1,944,587,740,358,431,500 | 31.224335 | 121 | 0.56649 | false |
DaveBuckingham/robosoft | record_mode.py | 1 | 8314 | """
Provides functions for
1) recording outputs to file
2) replaying outputs from files
"""
import global_data
import mctransmitter
import datetime
import os
import errno
import time
import threading
import ui_display
playback_file_tag = None
save_filename_prefix = 'botwurst_command_record_'
default_save_directory = 'botwurst_command_recordings'
save_file_extension = '.dat'
# TODO set recording limit]
def make_directory(directory_name):
try:
os.makedirs(directory_name + '/')
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def set_default_save_directory(directory_name):
record_save_directory = directory_name
# HELPER FUNCTION FOR LOOKING AT GLOBAL VARIABLES
def print_global_record_variables():
print "RECORDING VARIABLE SETTINGS"
print "===================="
print "Recording: ", global_data.record
print "Will store in file numbered: ", global_data.record_file_number, " in directory: ", default_save_directory
print "Initial time: ", global_data.record_start_time
print "Recording array is empty: ", (len(global_data.record_array) == 0)
print "===================="
# RECORDING FUNCTIONS
def initialize_record_mode(file_number):
"""
Sets all the global_data variables to reflect that we are now recording
Creates the specified directory in which the recording file is to be saved, if directory does not exist
:param file_number: Tag for file where recording will be stored
"""
# if record_array is not empty back it up to file
if global_data.record_array:
file_tag = global_data.record_file_number + "_backup"
create_record_file(file_tag)
global_data.record = True
global_data.record_file_number = file_number
global_data.record_start_time = datetime.datetime.now()
# if save_directory already exists as subdirectory, nothing will happen
make_directory(default_save_directory)
def append_instruction(instruction):
"""
Appends the instruction to record array in global data with time step from 0
:param instruction: triple (PIN TYPE, PIN INDEX, VAL)
"""
time_stamp = datetime.datetime.now()
# TODO: look into note about datetime subtraction (is exact but may overflow)
time_diff = time_stamp - global_data.record_start_time
pin_type = instruction[0]
pin_index = instruction[1]
value = instruction[2]
record_instruction = (pin_type, pin_index, value, time_diff.total_seconds())
global_data.record_array.append(record_instruction)
# 2) CREATE A FILE FROM RECORD ARRAY
def create_record_file(file_tag=None, save_directory=None):
"""
Creates a file with the list of instructions in record_array
:param file_tag: defaults to file_number in global data
"""
if file_tag is None:
file_tag = global_data.record_file_number
if save_directory is None:
save_directory = default_save_directory
record_filename = save_directory + '/' + save_filename_prefix + str(file_tag) + save_file_extension
# Create new file, or overwrite file if it exists
with open(record_filename, 'w') as recording_file:
# Copy all commands to the file
for command in global_data.record_array:
recording_file.write(str(command) + '\n')
# Reinitialize all record variables
global_data.record = False
global_data.record_file_number = None
global_data.record_start_time = None
global_data.record_array = []
# 2) PLAYBACK FUNCTIONS
def clear_playback_array():
global_data.playback_array = []
def populate_playback_array_from_file(filename, is_file_tag=False, save_directory=None):
"""
Appends instructions from current file to playback array
:param filename: name of file containing recording information
:param is_file_tag: True if only using number to identify file (default False)
:param save_directory: default directory specified in global data
"""
if save_directory is None:
save_directory = default_save_directory
if is_file_tag:
filename = save_filename_prefix + str(filename)
playback_file = open(save_directory + '/' + str(filename) + save_file_extension, 'r')
playback_file_lines = playback_file.readlines()
for line in playback_file_lines:
global_data.playback_array.append((eval(line.rstrip())))
def playback_instruction(pin_type, pin_index, value):
if pin_type == 'd':
# print "DIGITAL, PIN_INDEX: ", pin_index, "VALUE: ", value
mctransmitter.tx_digital(pin_index, value)
elif pin_type == 'a':
# print "ANALOG, PIN_INDEX: ", pin_index, "VALUE: ", value
mctransmitter.tx_analog(pin_index, value)
class Playback_From_Array(threading.Thread):
def __init__(self, parent, queue):
threading.Thread.__init__(self)
self._queue = queue
self._parent = parent
self.start()
def run(self):
curr_time_stamp = 0
for instruction in self._queue:
while global_data.playback_paused:
if global_data.playback_cancel:
break
time.sleep(.1)
if global_data.playback_cancel:
break
temp_time_stamp = instruction[3]
time_diff = (temp_time_stamp - curr_time_stamp)
time.sleep(time_diff)
playback_instruction(instruction[0], instruction[1], instruction[2])
curr_time_stamp = temp_time_stamp
ui_display.update()
clear_playback_array()
global_data.playback = False
global_data.playback_file_number = None
ui_display.update()
def playback_from_file(filename, is_file_tag=False, save_directory=None):
clear_playback_array()
global_data.playback = True
global_data.playback_file_number = filename
populate_playback_array_from_file(filename, is_file_tag, save_directory)
playback_thread = Playback_From_Array(None, global_data.playback_array)
return playback_thread
# TESTING FUNCTIONS: TO REMOVE
# class Print_Hello_Every_Sec(threading.Thread):
# def __init__(self, parent, queue):
# threading.Thread.__init__(self)
# self._queue = queue
# self._parent = parent
# self.start()
#
# def run(self):
# for i in range(15):
# print "**********HELLO THERE**************"
# time.sleep(1)
#
# class Pause_Unpause(threading.Thread):
# def __init__(self, parent, queue):
# threading.Thread.__init__(self)
# self._queue = queue
# self._parent = parent
# self.start()
#
# def run(self):
# time.sleep(2)
# global_data.playback_paused = True
# print "PAUSING"
# time.sleep(5)
# global_data.playback_cancel = True
# print "CANCELLING"
# time.sleep(5)
# print "UNPAUSING"
# global_data.playback_paused = False
#
#
# def create_dummy_instruction_file(file_tag):
# short_delay = 0.1
# long_delay = 1
#
# initialize_record_mode(file_tag)
# print_global_record_variables()
#
# i = 1
# j = 0
#
# for iterator in range(10):
# i_is_even = (1 == i%2)
#
# digital_instruction = ('d', 0, i_is_even)
# append_instruction(digital_instruction)
#
# time.sleep(short_delay)
#
# digital_instruction = ('d', 1, not i_is_even)
# append_instruction(digital_instruction)
#
# time.sleep(short_delay)
#
# val = abs((j % 510) - 255)
#
# analog_instruction = ('a', 0, val)
# append_instruction(analog_instruction)
#
# time.sleep(short_delay)
#
# analog_instruction = ('a', 1, 255 - val)
# append_instruction(analog_instruction)
#
# time.sleep(long_delay)
#
# i = i + 1
# j = j + 20
#
# create_record_file()
#
# def main():
# test_file_tag = 5
# # create_dummy_instruction_file(test_file_tag)
#
# pause_thread = Pause_Unpause(None, None)
# playback_thread = playback_from_file(test_file_tag, True)
# print_hello_thread = Print_Hello_Every_Sec(None, None)
#
# print_hello_thread.join()
# playback_thread.join()
# pause_thread.join()
#
# print_global_record_variables()
#
#
# main()
| mit | 2,830,740,662,388,459,500 | 28.799283 | 116 | 0.636998 | false |
Kumapapa2012/Learning-Machine-Learning | Reversi/agent_lrelu_0_1.py | 1 | 15736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import copy
import numpy as np
np.random.seed(0)
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import cuda
from chainer import optimizers
from rlglue.agent.Agent import Agent
from rlglue.agent import AgentLoader as AgentLoader
from rlglue.types import Action
from rlglue.types import Observation
from rlglue.utils import TaskSpecVRLGLUE3
# QNet
# ニューラルネットワークのクラス
class QNet(chainer.Chain):
# __init__( n_in, n_units, n_out)
# n_in: 入力層サイズ
# n_units: 中間層サイズ
# n_out: 出力層サイズ
def __init__(self, n_in, n_units, n_out):
super(QNet, self).__init__(
l1=L.Linear(n_in, n_units),
l20=L.Linear(n_units, n_units),
l21=L.Linear(n_units, n_units),
l22=L.Linear(n_units, n_units),
l23=L.Linear(n_units, n_units),
l24=L.Linear(n_units, n_units),
l25=L.Linear(n_units, n_units),
l26=L.Linear(n_units, n_units),
l27=L.Linear(n_units, n_units),
l3=L.Linear(n_units, n_out),
)
#value(x)
# x: 入力層の値
#ニューラルネットワークによる計算
#Return: 出力層の結果
def value(self, x):
h = F.leaky_relu(self.l1(x),slope=0.1) #slope=0.2(default)
h = F.leaky_relu(self.l20(h),slope=0.1)
h = F.leaky_relu(self.l21(h),slope=0.1)
h = F.leaky_relu(self.l22(h),slope=0.1)
h = F.leaky_relu(self.l23(h),slope=0.1)
h = F.leaky_relu(self.l24(h),slope=0.1)
h = F.leaky_relu(self.l25(h),slope=0.1)
h = F.leaky_relu(self.l26(h),slope=0.1)
h = F.leaky_relu(self.l27(h),slope=0.1)
return self.l3(h)
#__call__(s_data, a_data, y_data)
# s_data: 状態
# a_data: アクション
# y_data: 教師データ(次の行動の最大Q値)
#学習用コールバック。
#1. s_data を Forward 伝播する(Q,Q_Data)
#2. t_data に Q_Dataをコピー
#3.t_data の a_data[i] の値を y_data[i]の Q 値で置き換え教師データ作成(t)
#4. Q,t の二乗誤差を算出
#
#Return: 二乗誤差計算結果
def __call__(self, s_data, a_data, y_data):
self.loss = None
s = chainer.Variable(self.xp.asarray(s_data))
Q = self.value(s)
Q_data = copy.deepcopy(Q.data)
if type(Q_data).__module__ != np.__name__:
Q_data = self.xp.asnumpy(Q_data)
t_data = copy.deepcopy(Q_data)
for i in range(len(y_data)):
t_data[i, a_data[i]] = y_data[i]
t = chainer.Variable(self.xp.asarray(t_data))
self.loss = F.mean_squared_error(Q, t)
print('Loss:', self.loss.data)
return self.loss
# エージェントクラス
class KmoriReversiAgent(Agent):
#__init__(gpu, size)
# gpu: GPU 番号(0以上、CPU 使用の場合 -1)
# size: 正方形ボードの 1 辺の長さ(6 以上の偶数)
# エージェントの初期化、学習の内容を定義する
def __init__(self, gpu, size):
# サイズは 6 以上の偶数で。
if size<6 and size%2 != 0 : print("size must be even number and 6 or above!") ; exit()
# 盤の情報(オセロは8)
self.n_rows = int(size)
self.n_cols = self.n_rows
# 学習のInputサイズ
self.dim = self.n_rows * self.n_cols # ボードサイズ=出力層のサイズ
self.bdim = self.dim * 4 # 学習用データのサイズ
self.gpu = gpu
# 学習を開始させるステップ数
self.learn_start = 5 * 10**3
# 保持するデータ数(changed)
self.capacity = 2 * 10**4
# eps = ランダムに○を決定する確率
self.eps_start = 1.0
self.eps_end = 0.001
self.eps = self.eps_start
# 学習時にさかのぼるAction数
self.n_frames = 9
# 一度の学習で使用するデータサイズ
self.batch_size = 128
self.replay_mem = []
self.last_state = None
self.last_action = None
self.reward = None
self.state = np.zeros((1, self.n_frames, self.bdim)).astype(np.float32)
self.step_counter = 0
self.update_freq = 1 * 10**4
self.r_win = 1.0
self.r_draw = -0.5
self.r_lose = -1.0
self.frozen = False
self.win_or_draw = 0
self.stop_learning = 200
#agent_init(task_spec_str)
# task_spec_str: RL_Glue から渡されるタスク情報
# ゲーム情報の初期化
def agent_init(self, task_spec_str):
task_spec = TaskSpecVRLGLUE3.TaskSpecParser(task_spec_str)
if not task_spec.valid:
raise ValueError(
'Task spec could not be parsed: {}'.format(task_spec_str))
self.gamma = task_spec.getDiscountFactor() # 割引率
# DQN 作成
# Arg1: 入力層サイズ
# Arg2: 隠れ層ノード数
# Arg3: 出力層サイズ
self.Q = QNet(self.bdim*self.n_frames, self.bdim*self.n_frames, self.dim)
if self.gpu >= 0:
cuda.get_device(self.gpu).use()
self.Q.to_gpu()
self.xp = np if self.gpu < 0 else cuda.cupy
self.targetQ = copy.deepcopy(self.Q)
self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95,
momentum=0.0)
self.optimizer.setup(self.Q)
#agent_start(observation)
# observation: ゲーム状態(ボード状態など)
#environment.py の env_startの次に呼び出される。
#1手目 Action を決定し、実行する。
#実行した Action をエージェントへの情報として RL_Glue に渡す。
def agent_start(self, observation):
# stepを1増やす
self.step_counter += 1
# kmori: 独自のobservationを使用して、状態をアップデート。
# 一部サンプルに合わせ、残りは別の方法で作成した。
self.update_state(observation)
self.update_targetQ()
# 自分が打つ手を決定する。
int_action = self.select_int_action()
action = Action()
action.intArray = [int_action]
# eps を更新する。epsはランダムに○を打つ確率
self.update_eps()
# state = 盤の状態 と action = ○を打つ場所 を退避する
self.last_state = copy.deepcopy(self.state)
self.last_action = copy.deepcopy(int_action)
return action
#agent_step(reward, observation)
# reward: 報酬
# observation: ゲーム状態(ボード状態など)
#エージェントの二手目以降、ゲームが終わるまで呼ばれる。
#(Reversi の場合、報酬は常にゼロとなる)
def agent_step(self, reward, observation):
# ステップを1増加
self.step_counter += 1
self.update_state(observation)
self.update_targetQ()
# 自分が打つ手を決定する。
int_action = self.select_int_action() # 戻り値が -1 ならパス。
action = Action()
action.intArray = [int_action]
self.reward = reward
# epsを更新
self.update_eps()
# データを保存 (状態、アクション、報酬、結果)
self.store_transition(terminal=False)
if not self.frozen:
# 学習実行
if self.step_counter > self.learn_start:
self.replay_experience()
self.last_state = copy.deepcopy(self.state)
self.last_action = copy.deepcopy(int_action)
# ○の位置をエージェントへ渡す
return action
#agent_end(reward)
# reward: 報酬
# ゲームが終了した時点で呼ばれる
def agent_end(self, reward):
# 環境から受け取った報酬
self.reward = reward
if not self.frozen:
if self.reward >= self.r_draw:
self.win_or_draw += 1
else:
self.win_or_draw = 0
if self.win_or_draw == self.stop_learning:
self.frozen = True
f = open('result.txt', 'a')
f.writelines('Agent frozen\n')
f.close()
# データを保存 (状態、アクション、報酬、結果)
self.store_transition(terminal=True)
if not self.frozen:
# 学習実行
if self.step_counter > self.learn_start:
self.replay_experience()
def agent_cleanup(self):
# (今後実装)
# RL_Cleanup により呼ばれるはず。
# ここでモデルをセーブすればきっといい。
pass
def agent_message(self, message):
pass
#update_state(observation=None)
# observation: ゲーム状態(ボード状態など)
#ゲーム状態を state に格納する。
def update_state(self, observation=None):
# 学習用の状態保存。
if observation is None:
frame = np.zeros(1, 1, self.bdim).astype(np.float32)
else:
# observation の内容から、学習用データを作成。
observation_binArray=[]
pageSize=self.n_rows*self.n_cols
# コマの位置
for i in range(0,pageSize):
observation_binArray.append(int(observation.intArray[i]))
observation_binArray.append(int(observation.intArray[pageSize+i]))
# コマを置ける場所
for i in range(0,pageSize):
observation_binArray.append(int(observation.intArray[2*pageSize+i]))
observation_binArray.append(int(observation.intArray[3*pageSize+i]))
frame = (np.asarray(observation_binArray).astype(np.float32)
.reshape(1, 1, -1))
self.state = np.hstack((self.state[:, 1:], frame))
#update_eps()
#ゲームの手数合計に基づき、ε-Greedy 法の ε を更新。
def update_eps(self):
if self.step_counter > self.learn_start:
if len(self.replay_mem) < self.capacity:
self.eps -= ((self.eps_start - self.eps_end) /
(self.capacity - self.learn_start + 1))
#update_targetQ()
#update_freq 毎に、現時点の Q 値を、targetQ(Q 値推測用 Network) にコピー。
def update_targetQ(self):
if self.step_counter % self.update_freq == 0:
self.targetQ = copy.deepcopy(self.Q)
#select_int_action()
#現在のボード状態から、DQN を用いて打つ手を決める。
#コマを置く場所を決める。
def select_int_action(self):
bits = self.state[0, -1] # ここでは stateの最後の要素つまり現時点の情報を得ている。
# ここでは、空きマスを取得している。
# このオセロでは、コマを置ける場所は Observation に含まれるのでそれを使用する。
free=[]
freeInBoard=bits[(2*self.n_rows*self.n_cols):]
for i in range(0, len(freeInBoard), 2) :
if int(freeInBoard[i]) == 1 :
free.append(i//2)
# 置ける場所がなければここでパス
if len(free)==0:
# pass...
return -1
# Q値を求める
s = chainer.Variable(self.xp.asarray(self.state))
Q = self.Q.value(s)
# Follow the epsilon greedy strategy
if np.random.rand() < self.eps:
int_action = free[np.random.randint(len(free))]
else:
# 先頭のQ値
Qdata = Q.data[0]
if type(Qdata).__module__ != np.__name__:
Qdata = self.xp.asnumpy(Qdata)
# アクションを決定します。
# 石を置けるマスの中から、Q値の最も高いものを行動として選択しています。
for i in np.argsort(-Qdata):
if i in free:
int_action = i
break
return int_action
def store_transition(self, terminal=False):
if len(self.replay_mem) < self.capacity:
self.replay_mem.append(
(self.last_state, self.last_action, self.reward,
self.state, terminal))
else:
# self.replay_mem[1:] で先頭つまり最古の要素を除く配列に、新しいものを追加。
# これにより FIFO でリストが回転する。
self.replay_mem = (self.replay_mem[1:] +
[(self.last_state, self.last_action, self.reward,
self.state, terminal)])
def replay_experience(self):
# replay_memory から バッチサイズ分の要素をランダムに取得する。
indices = np.random.randint(0, len(self.replay_mem), self.batch_size)
samples = np.asarray(self.replay_mem)[indices]
s, a, r, s2, t = [], [], [], [], []
for sample in samples:
s.append(sample[0])
a.append(sample[1])
r.append(sample[2])
s2.append(sample[3])
t.append(sample[4])
s = np.asarray(s).astype(np.float32)
a = np.asarray(a).astype(np.int32)
r = np.asarray(r).astype(np.float32)
s2 = np.asarray(s2).astype(np.float32)
t = np.asarray(t).astype(np.float32)
#Q 値推測用ネットワーク targetQ を取得し、s2の Q 値を求める
s2 = chainer.Variable(self.xp.asarray(s2))
Q = self.targetQ.value(s2)
Q_data = Q.data
if type(Q_data).__module__ == np.__name__:
max_Q_data = np.max(Q_data, axis=1)
else:
max_Q_data = np.max(self.xp.asnumpy(Q_data).astype(np.float32), axis=1)
#targetQで推測した Q 値を使用して 教師データ t 作成
t = np.sign(r) + (1 - t)*self.gamma*max_Q_data
self.optimizer.update(self.Q, s, a, t)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Deep Q-Learning')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--size', '-s', default=6, type=int,
help='Reversi board size')
args = parser.parse_args()
AgentLoader.loadAgent(KmoriReversiAgent(args.gpu,args.size))
| mit | 504,017,417,974,920,600 | 30.820388 | 94 | 0.513312 | false |
PDIS/pdis-bot | SayIt-Discourse/SayitDiscourse.py | 1 | 2296 | # encoding: utf-8
from HTMLParser import HTMLParser
import time
import urllib2
import requests
import json
import yaml
from lxml import etree
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
try:
s = MLStripper()
s.feed(html)
return s.get_data()
except:
return html
with open('config.json', 'r') as f:
config = json.load(f)
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
request = urllib2.Request(config['sayit-url']+"speeches", headers=hdr)
response = urllib2.urlopen(request)
html = response.read()
#print html
page = etree.HTML(html)
for txt in page.xpath(u"//li/span/a"):
txt_title = txt.text
txt_url = config['sayit-url'] + txt.values()[0]
if txt.text[:6] == "2016-1":
txt_date = txt.text[:10]
raw = {}
raw['title'] = txt_title
raw['date'] = txt_date
raw['category'] = None
raw['tags'] = ['transcript']
raw['participants'] = ['PDIS']
raw['content'] = [
{"Transcript":txt_url}
]
# print yaml.dump(raw,default_flow_style=False)
# post to pdis discourse
url = "https://talk.pdis.nat.gov.tw/posts?api_key="+config['discourse-api-key']+\
"&api_username="+config['discourse-api-username']
post_details = {
"title": txt_title,
"raw": yaml.safe_dump(raw, encoding='utf-8', allow_unicode=True, default_flow_style=False),
"category": config['discourse-category-id']
}
resp = requests.post(url, data=post_details, allow_redirects=True, verify=False)
print post_details
time.sleep(3)
else:
print '{"url":"' + 'https://sayit.pdis.nat.gov.tw'+txt.values()[0]+'", "title":"'+txt.text + '"}'
| mit | -5,676,584,200,813,038,000 | 29.210526 | 128 | 0.577526 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.