repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dhazel/buck | bucking/stable/buck_1_4.py | 1 | 6187 | #
# based on Li = [0 0 0 0 0]
#
# while Li(1)
# check iteration number
# either load start lengh or decrease length by one value
# calculate length price
#
# while Li(2)
# check iteration number
# either load start lengh or decrease length by one value
# calculate length price
#
# while Li(3)
# check iteration number
# either load start lengh or decrease length by one value
# calculate length price
#
# while Li(4)
# check iteration number
# either load start lengh or decrease length by one value
# calculate length price
#
# while Li(5)
# check iteration number
# either load start lengh or decrease length by one value
# calculate length price
#
# end
# end
# end
# end
# end
#
#
#from numarray import *
from copy import *
import sys
import os
import math
import interpolate
import buck1p
import logvolume_2
import buckPCh
def buck_1_4(L,log,log_dia,gui_mode):
prices = buckPCh.get_prices()
Li = [L,0,0,0,0,0] #length iterators
p16 = prices[0]
p30 = prices[1]
p36 = prices[2]
it = [0,0,0,0,0,0] #iteration tracker
p = [0,0,0,0,0,0] #price tracker
p1 = [0,0,0,0,0,0]
v = [0,0,0,0,0,0] #volume tracker
v1 = [0,0,0,0,0,0]
td = [0,0,0,0,0,0] #top diameter tracker
td1 = [0,0,0,0,0,0]
Lf = [0,0,0,0,0,0] #lengths tracker
Lf2 = [0,0,0,0,0,0] #secondary lengths tracker
lognum = 2 #log number control variable
s=0
while s >= 0:
if Li[s] <= (16 + (0.8333)):
it[s] = 0
s = s - 1
if it[s] == 0: #either load start length or
if Li[s] <= (40 + (0.8333)): #if log is within 40ft long
# use the total
Li[s] = round(Li[s] - ((0.8333) - 0.5))
#normalize the rounding to 10inch over
Li[s] = Li[s] - (1 - (0.8333))
#ensure length divisible by 2
if ((1e-5) <= (math.fmod(Li[s],2) - (0.8333))):
#set start log length
Li[s] = Li[s] - 1
else:
Li[s] = (40 + (0.8333))
else:
Li[s] = Li[s] - 2 #decrease length by one value
it[s] = it[s] + 1
# print 'log loop %i\n' %s
# print 'Li[s] = %0.4f\n' %Li[s]
#calculate length price
dia = interpolate.interpolate(sum(Li),log,log_dia)
dia = int(dia) #-->FIXME: Look at this later
td[s] = dia
v[s] = logvolume_2.logvolume_2(Li[s],dia)
p[s] = buck1p.buck1p(Li[s],v[s],p16,p30,p36)
Li[s+1] = L - sum(Li) #bump remaining length ahead
sum_p = sum(p)
if sum_p >= sum(p1):
p2 = copy(p1)
p1 = copy(p)
v2 = copy(v1)
v1 = copy(v)
td2 = copy(td1)
td1 = copy(td)
Lf2 = copy(Lf)
Lf = copy(Li)
elif sum_p > sum(p2):
p2 = copy(p)
v2 = copy(v)
td2 = copy(td)
Lf2 = copy(Li)
if s <= (lognum):
s = s + 1
while (((s >= 0) & (Li[s] <= 16.8333)) | (s == lognum)):
Li[s] = 0
#clear all previous log lengths from the top of the tree
if (s+1) < len(Li):
Li[s+1] = 0
p[s] = 0
v[s] = 0
td[s] = 0
it[s] = 0
s = s - 1
if gui_mode == 1 :
# make grandios graphical table of data...
file = open(sys.path[0]+os.sep+"output.txt",mode='w')
i = 0
for entry in v1: # clean up output to be more user-friendly (clarity)
if entry == 0:
Lf[i] = 0
i = i + 1
i = 0
for entry in v2: # clean up output to be more user-friendly (clarity)
if entry == 0:
Lf2[i] = 0
i = i + 1
print >>file
print >>file, "first choice..."
print >>file, "Lengths are: [%i, %i, %i, %i, %i]" %(Lf[0], Lf[1], Lf[2], Lf[3], Lf[4]), "total:", sum(Lf)
print >>file, "Volumes are:", v1, "total:", sum(v1)
print >>file, "Top diams are:", td1
print >>file, "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p1[0], p1[1], p1[2], p1[3], p1[4]), "total:", sum(p1)
print >>file
print >>file, "second choice..."
print >>file, "Lengths are: [%i, %i, %i, %i, %i]" %(Lf2[0], Lf2[1], Lf2[2], Lf2[3], Lf2[4]), "total:", sum(Lf2)
print >>file, "Volumes are:", v2, "total:", sum(v2)
print >>file, "Top diams are:", td2
print >>file, "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p2[0], p2[1], p2[2], p2[3], p2[4]), "total:", sum(p2)
print >>file
file.close()
os.system("kwrite "+sys.path[0]+os.sep+"output.txt &")
else:
print
print "first choice..."
print "Lengths are: [%i, %i, %i, %i, %i]" %(Lf[0], Lf[1], Lf[2], Lf[3], Lf[4]), "total:", sum(Lf)
print "Volumes are:", v1, "total:", sum(v1)
print "Top diams are:", td1
print "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p1[0], p1[1], p1[2], p1[3], p1[4]), "total:", sum(p1)
print
print "second choice..."
print "Lengths are: [%i, %i, %i, %i, %i]" %(Lf2[0], Lf2[1], Lf2[2], Lf2[3], Lf2[4]), "total:", sum(Lf2)
print "Volumes are:", v2, "total:", sum(v2)
print "Top diams are:", td2
print "Prices are: [%3.3f, %3.3f, %3.3f, %3.3f, %3.3f]" %(p2[0], p2[1], p2[2], p2[3], p2[4]), "total:", sum(p2)
print
| gpl-2.0 | 8,964,877,433,528,988,000 | 32.372222 | 127 | 0.431388 | false |
cjolowicz/wald | wald/backend.py | 1 | 1673 | # pylint: disable=too-few-public-methods
'''Backends for documents.'''
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, backref
__base_class__ = declarative_base()
class Node(__base_class__):
'''A node in the document.'''
__tablename__ = 'nodes'
node_id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('nodes.node_id'))
name = Column(String)
content = Column(String)
children = relationship(
'Node',
cascade='all',
backref=backref('parent', remote_side='Node.node_id'))
def __init__(self, name, *args, **kwargs):
super(Node, self).__init__(*args, name=name, **kwargs)
class Document(object):
'''A hierarchically structured document.'''
def __init__(self, filename=None):
self._filename = filename or ':memory:'
self._engine = create_engine('sqlite:///%s' % self._filename, echo=True)
session_class = sessionmaker(bind=self._engine)
__base_class__.metadata.create_all(self._engine)
self._session = session_class()
@property
def roots(self):
'''Return the root nodes of the document.'''
return self._session.query(Node).filter(
Node.parent_id.is_(None))
def add(self, node):
'''Add the node to the document.'''
self._session.add(node)
def remove(self, node):
'''Remove the node from the document.'''
self._session.delete(node)
def save(self):
'''Save the document.'''
self._session.commit()
| gpl-2.0 | 854,792,258,371,098,100 | 29.981481 | 80 | 0.62642 | false |
johnpeck/cgrlib | cgrlib/utils.py | 1 | 26763 | # utils.py
#
# Utility functions for use with the CGR-101 USB oscilloscope
import logging # The python logging module
import serial # Provides serial class Serial
import time # For making pauses
from datetime import datetime # For finding calibration time differences
import binascii # For hex string conversion
import pickle # For writing and reading calibration data
import sys # For sys.exit()
import os # For diagnosing exceptions
import collections # For rotatable lists
import shutil # For copying files
import termios # For catching termios exceptions
import ConfigParser # For writing and reading the config file
from configobj import ConfigObj # For writing and reading config file
utilnum = 47
# create logger
module_logger = logging.getLogger('root.utils')
module_logger.setLevel(logging.DEBUG)
# comports() returns a list of comports available in the system
from serial.tools.list_ports import comports
# Global variables
cmdterm = '\r\n' # Terminates each command
fresolution = 0.09313225746 # Frequency resolution (Hz)
def int8_to_dec(signed):
"""Return a signed decimal number given a signed 8-bit integer
Arguments:
signed -- Signed 8-bit integer (0-255)
"""
if (signed > 127):
decimal = signed - 256
else:
decimal = signed
return decimal
def get_phasestr(frequency):
"""Return a string of four 8-bit numbers used to set the waveform
output frequency
Arguments:
frequency -- Floating point frequency in Hz
The function generator has a frequency resolution of 0.09313225746
Hz.
"""
pval = int(frequency/fresolution)
fthree = int(pval/(2**24))
ftwo = int((pval%(2**24))/(2**16))
fone = int((pval%(2**16))/(2**8))
fzero = int((pval%(2**8)))
retstr = (str(fthree) + ' ' +
str(ftwo) + ' ' +
str(fone) + ' ' +
str(fzero)
)
return(retstr)
def set_sine_frequency(handle, setfreq):
""" Return the actual frequency set on the hardware
Arguments:
handle -- Serial object for the CGR-101
setfreq -- The floating point frequency in Hz
"""
handle.open()
actfreq = int(setfreq / fresolution) * fresolution
phase_string = get_phasestr(actfreq)
sendcmd(handle,'W F ' + phase_string)
handle.close()
return actfreq
def set_arb_value(handle, address, value):
""" Set an output value in the arbitrary waveform output buffer
Arguments:
handle -- Serial object for the CGR-101
address -- Address of the value (0-255)
value -- Value of the arb (0-255)
"""
handle.open()
sendcmd(handle,'W S ' + str(address) + ' ' + str(value))
handle.close()
return
def set_output_amplitude(handle, amplitude):
""" Return the actual output amplitude set on the hardware
Arguments:
handle -- Serial object for the CGR-101
amplitude -- The requested amplitude in Volts
"""
handle.open()
if amplitude > 3:
module_logger.error('Requested amplitude ' + str(amplitude) + ' Vp. Maximum 3Vp')
amplitude = 3
azero = int(round(255 * float(amplitude)/3.0))
actamp = azero * 3.0/255
sendcmd(handle,'W A ' + str(azero))
handle.close()
return actamp
def write_cal(handle, calfile, caldict):
"""Write calibration constants to a file and to the eeprom.
See the caldict_default definition for the list of dictionary
entries. If the specified calfile exists, it will be saved as
calfile_old and a new calfile will be written.
Arguments:
handle -- Serial object for the CGR-101
calfile -- Filename for saving calibration constants.
caldict -- A dictionary of (calibration factor names) : values
"""
try:
with open(calfile):
# If this succeeds, the file already exists. See if
# anything has changed.
caldict_old = load_cal(handle, calfile)
calchanged = False
for key in caldict:
if (caldict[key] != caldict_old[key]):
calchanged = True
module_logger.debug('Cal factor ' + key + ' has changed')
module_logger.debug(str(caldict_old[key]) + ' --> ' +
str(caldict[key]))
if calchanged:
# The calibration has changed. Back up the old
# calibration file and write a new one.
calfile_old = (calfile.split('.')[0] + '_old.' +
calfile.split('.')[1])
module_logger.info(
'Backing up calibration file ' + calfile +
' to ' + calfile_old
)
shutil.copyfile(calfile,(
calfile.split('.')[0] + '_old.' + calfile.split('.')[1]
))
module_logger.info('Writing calibration to ' + calfile)
with open(calfile,'w') as fout:
pickle.dump(caldict,fout)
fout.close()
except IOError:
# The calfile doesn't exist, so write one.
module_logger.info('Writing calibration to ' + calfile)
with open(calfile,'w') as fout:
pickle.dump(caldict,fout)
fout.close()
# Write eeprom values
set_eeprom_offlist(
handle,
[caldict['chA_10x_eeprom'],caldict['chA_1x_eeprom'],
caldict['chB_10x_eeprom'],caldict['chB_1x_eeprom']]
)
"""Specify a default calibration dictionary.
This dictionary definition is also where all the calibration factors
are defined. If you want to add another factor, this is the place to
do it.
eeprom values are offsets to be stored in eeprom. Values are scaled
from their file-based values by the eeprom_scaler factor. If you
change this factor, you must remove the pickled calibration file and
recalibrate.
"""
caldict_default = {
'eeprom_scaler': 5.0,
'chA_1x_offset': 0,
'chA_1x_eeprom': 0,
'chA_1x_slope': 0.0445,
'chA_10x_offset': 0,
'chA_10x_eeprom':0,
'chA_10x_slope': 0.0445,
'chB_1x_offset': 0,
'chB_1x_eeprom': 0,
'chB_1x_slope': 0.0445,
'chB_10x_offset': 0,
'chB_10x_eeprom': 0,
'chB_10x_slope': 0.0445,
}
def load_cal(handle, calfile):
"""Load and return calibration constant dictionary.
If the calibration file exists, use the coefficients in it. If it
doesn't, load calibration offsets from the CGR unit. Use these
values in the caldict_default dictionary.
Arguments:
handle -- Serial object for the CGR-101
calfile -- Filename for calibration constants saved in Python's
pickle format.
"""
try:
# Try loading the calibration file
module_logger.info('Loading calibration file ' + calfile)
fin = open(calfile,'rb')
caldict = pickle.load(fin)
# Make sure all needed calibration factors are in the dictionary
for key in caldict_default:
if not key in caldict:
module_logger.info('Adding calibration value ' +
str(key) + ' to dictionary.'
)
caldict[key] = caldict_default[key]
fin.close()
except IOError:
# We didn't find the calibration file. Load constants from eeprom.
module_logger.warning(
'Failed to open calibration file...using defaults'
)
eeprom_list = get_eeprom_offlist(handle)
caldict = caldict_default
# Fill in offsets from eeprom values
caldict['chA_10x_offset'] = int8_to_dec(
eeprom_list[0]/caldict['eeprom_scaler']
)
caldict['chA_1x_offset'] = int8_to_dec(
eeprom_list[1]/caldict['eeprom_scaler']
)
caldict['chB_10x_offset'] = int8_to_dec(
eeprom_list[2]/caldict['eeprom_scaler']
)
caldict['chB_1x_offset'] = int8_to_dec(
eeprom_list[3]/caldict['eeprom_scaler']
)
return caldict
def get_cgr(config):
""" Return a serial object for the cgr scope
Arguments:
config -- Configuration object read from configuration file.
"""
# The comports() function returns an iterable that yields tuples of
# three strings:
#
# 1. Port name as it can be passed to serial.Serial
# 2. Description in human readable form
# 3. Sort of hardware ID -- may contain VID:PID of USB-serial adapters.
portset = set(comports()) # Use set to prevent repeats
# Add undetectable serial ports here
for portnum in range(10):
portset.add(('/dev/ttyUSB' + str(portnum),
'ttyUSB' + str(portnum), 'n/a')
)
# Add the port specified in the configuration to the front of the
# list. We have to convert the set object to a list because set
# objects do not support indexing.
portlist = [(config['Connection']['port'],'','')] + list(portset)
for serport in portlist:
rawstr = ''
try:
cgr = serial.Serial()
cgr.baudrate = 230400
cgr.timeout = 0.1 # Set timeout to 100ms
cgr.port = serport[0]
module_logger.debug('Trying to connect to CGR-101 at ' + serport[0])
cgr.open()
# If the port can be configured, it might be a CGR. Check
# to make sure.
retnum = cgr.write("i\r\n") # Request the identity string
rawstr = cgr.read(10) # Read a small number of bytes
cgr.close()
if rawstr.count('Syscomp') == 1:
# Success! We found a CGR-101 unit!
module_logger.info('Connecting to CGR-101 at ' +
str(serport[0]))
# Write the successful connection port to the configuration
config['Connection']['port'] = str(serport[0])
config.write()
return cgr
else:
module_logger.info('Could not open ' + serport[0])
if serport == portlist[-1]: # This is the last port
module_logger.error(
'Did not find any CGR-101 units. Exiting.'
)
sys.exit()
# Catch exceptions caused by problems opening a filesystem node as
# a serial port, by problems caused by the node not existing, and
# general tty problems.
except (serial.serialutil.SerialException,
OSError, termios.error):
module_logger.debug('Could not open ' + serport[0])
if serport == portlist[-1]: # This is the last port
module_logger.error(
'Did not find any CGR-101 units. Exiting.'
)
sys.exit()
# This exception should never get handled. It's just for debugging.
except Exception as ex:
template = "An exception of type: {0} occured. Arguments:\n{1!r}"
message = template.format((type(ex).__module__ + '.' +
type(ex).__name__), ex.args)
module_logger.error(message)
sys.exit()
def flush_cgr(handle):
readstr = 'junk'
while (len(readstr) > 0):
readstr = handle.read(100)
module_logger.info('Flushed ' + str(len(readstr)) + ' characters')
def sendcmd(handle,cmd):
""" Send an ascii command string to the CGR scope.
Arguments:
handle -- Serial object for the CGR scope
cmd -- Command string
"""
handle.write(cmd + cmdterm)
module_logger.debug('Sent command ' + cmd)
time.sleep(0.1) # Can't run at full speed.
def get_samplebits(fsamp_req):
"""Returns a valid sample rate setting.
Given a sample rate in Hz, returns the closest possible hardware
sample rate setting. This setting goes in bits 0:3 of the control
register.
The sample rate is given by (20Ms/s)/2**N, where N is the 4-bit
value returned by this function.
Arguments:
fsamp_req -- The requested sample rate in Hz.
"""
baserate = 20e6 # Maximum sample rate
ratelist = []
for nval in range(2**4):
ratelist.append( (baserate / ( 2**nval )) )
fsamp_act = min(ratelist, key=lambda x:abs(x - fsamp_req))
setval = ratelist.index(fsamp_act)
return [setval,fsamp_act]
def askcgr(handle,cmd):
"""Send an ascii command to the CGR scope and return its reply.
Arguments:
handle -- Serial object for the CGR scope
cmd -- Command string
"""
sendcmd(handle,cmd)
try:
retstr = handle.readline()
return(retstr)
except:
return('No reply')
def get_state(handle):
""" Return the CGR's state string
Returned string Corresponding state
---------------------------------------
State 1 Idle
State 2 Initializing capture
State 3 Wait for trigger signal to reset
State 4 Armed, waiting for capture
State 5 Capturing
State 6 Done
Arguments:
handle -- Serial object for the CGR scope
"""
handle.open()
retstr = askcgr(handle,'S S')
print(retstr)
if (retstr == "No reply"):
print('getstat: no response')
handle.close()
return retstr
def get_timelist(fsamp):
"""Return a list of sample times
Arguments:
fsamp -- Sample rate in Hz
Remember that the CGR-101 takes 2048 samples, but this is a total
for both channels. Each channel will have 1024 samples. The
sample rate calculation is based on these 1024 samples -- not
2048.
"""
timelist = []
for samplenum in range(1024):
timelist.append( samplenum * (1.0/fsamp) )
return timelist
def get_eeprom_offlist(handle):
"""Returns the offsets stored in the CGR's eeprom. This will be a
list of signed integers:
[Channel A 10x range offset, Channel A 1x range offset,
Channel B 10x range offset, Channel B 1x range offset]
"""
handle.open()
sendcmd(handle,'S O')
retdata = handle.read(10)
handle.close()
hexdata = binascii.hexlify(retdata)[2:]
cha_hioff = int(hexdata[0:2],16)
cha_looff = int(hexdata[2:4],16)
chb_hioff = int(hexdata[4:6],16)
chb_looff = int(hexdata[6:8],16)
# Unsigned decimal list
udeclist = [cha_hioff, cha_looff, chb_hioff, chb_looff]
declist = []
for unsigned in udeclist:
if (unsigned > 127):
signed = unsigned - 256
else:
signed = unsigned
declist.append(signed)
return declist
def set_eeprom_offlist(handle,offlist):
"""Sets offsets in the CGR's eeprom.
Arguments:
handle -- Serial object for the CGR-101
offlist -- List of signed 8-bit integers:
[Channel A 10x range offset,
Channel A 1x range offset,
Channel B 10x range offset,
Channel B 1x range offset]
"""
unsigned_list = []
for offset in offlist:
if (offset < 0):
unsigned_list.append(offset + 256)
else:
unsigned_list.append(offset)
handle.open()
module_logger.debug('Writing chA 10x offset of ' + str(unsigned_list[0]) +
' to eeprom')
module_logger.debug('Writing chA 1x offset of ' + str(unsigned_list[1]) +
' to eeprom')
module_logger.debug('Writing chB 10x offset of ' + str(unsigned_list[2]) +
' to eeprom')
module_logger.debug('Writing chB 1x offset of ' + str(unsigned_list[3]) +
' to eeprom')
sendcmd(handle,('S F ' +
str(unsigned_list[0]) + ' ' +
str(unsigned_list[1]) + ' ' +
str(unsigned_list[2]) + ' ' +
str(unsigned_list[3]) + ' '
)
)
handle.close()
def set_trig_samples(handle,trigdict):
"""Set the number of samples to take after a trigger.
The unit always takes 1024 samples per channel. Setting the
post-trigger samples to a value less than 1024 means that samples
before the trigger will also be stored.
Arguments:
handle -- Serial object for the CGR-101
trigdict -- Dictionary of trigger settings. See get_trig_dict for
more details.
"""
handle.open()
totsamp = 1024
if (trigdict['trigpts'] <= totsamp):
setval_h = int((trigdict['trigpts']%(2**16))/(2**8))
setval_l = int((trigdict['trigpts']%(2**8)))
else:
setval_h = int((500%(2**16))/(2**8))
setval_l = int((500%(2**8)))
sendcmd(handle,('S C ' + str(setval_h) + ' ' + str(setval_l)))
handle.close()
def set_ctrl_reg(handle,fsamp_req,trigdict):
""" Sets the CGR-101's conrol register.
Arguments:
handle -- Serial object for the CGR-101
fsamp_req -- Requested sample rate in Hz. The actual rate will
be determined using those allowed for the unit.
trigdict -- Dictionary of trigger settings. See get_trig_dict
for more details.
"""
reg_value = 0
[reg_value,fsamp_act] = get_samplebits(fsamp_req) # Set sample rate
# Configure the trigger source
if trigdict['trigsrc'] == 0: # Trigger on channel A
reg_value += (0 << 4)
elif trigdict['trigsrc'] == 1: # Trigger on channel B
reg_value += (1 << 4)
elif trigdict['trigsrc'] == 2: # Trigger on external input
reg_value += (1 << 6)
# Configure the trigger polarity
if trigdict['trigpol'] == 0: # Rising edge
reg_value += (0 << 5)
elif trigdict['trigpol'] == 1: # Falling edge
reg_value += (1 << 5)
handle.open()
sendcmd(handle,('S R ' + str(reg_value)))
handle.close()
return [reg_value,fsamp_act]
def set_hw_gain(handle,gainlist):
"""Sets the CGR-101's hardware gain.
Arguments:
handle -- Serial object for the CGR-101.
gainlist -- [Channel A gain, Channel B gain]
...where the gain values are:
0: Set 1x gain
1: Set 10x gain (for use with a 10x probe)
"""
handle.open()
if gainlist[0] == 0: # Set channel A gain to 1x
sendcmd(handle,('S P A'))
elif gainlist[0] == 1: # Set channel A gain to 10x
sendcmd(handle,('S P a'))
if gainlist[1] == 0: # Set channel B gain to 1x
sendcmd(handle,('S P B'))
elif gainlist[1] == 1: # Set channel B gain to 10x
sendcmd(handle,('S P b'))
handle.close()
return gainlist
def get_trig_dict( trigsrc, triglev, trigpol, trigpts ):
"""Return a dictionary of trigger settings.
Arguments:
trigsrc -- Trigger source
0: Channel A
1: Channel B
2: External
3: Internal
triglev -- Trigger voltage (floating point volts)
trigpol -- Trigger slope
0: Rising
1: Falling
trigpts -- Points to acquire after trigger (0,1,2,...,1024)
"""
trigdict = {}
trigdict['trigsrc'] = trigsrc
trigdict['triglev'] = triglev
trigdict['trigpol'] = trigpol
trigdict['trigpts'] = trigpts
return trigdict
def set_trig_level(handle, caldict, gainlist, trigdict):
"""Sets the trigger voltage.
Arguements:
handle -- Serial object for the CGR-101
caldict -- Dictionary of slope and offset values
gainlist -- [Channel A gain, Channel B gain]
trigdict -- Dictionary of trigger settings. See get_trig_dict
for more details.
"""
handle.open()
if (gainlist[0] == 0 and trigdict['trigsrc'] == 0):
# Channel A gain is 1x
trigcts = (511 - caldict['chA_1x_offset'] -
float(trigdict['triglev'])/caldict['chA_1x_slope'])
elif (gainlist[0] == 1 and trigdict['trigsrc'] == 0):
# Channel A gain is 10x
trigcts = (511 - caldict['chA_10x_offset'] -
float(trigdict['triglev'])/caldict['chA_10x_slope'])
elif (gainlist[1] == 0 and trigdict['trigsrc'] == 1):
# Channel B gain is 1x
trigcts = (511 - caldict['chB_1x_offset'] -
float(trigdict['triglev'])/caldict['chB_1x_slope'])
elif (gainlist[1] == 1 and trigdict['trigsrc'] == 1):
# Channel B gain is 10x
trigcts = (511 - caldict['chB_10x_offset'] -
float(trigdict['triglev'])/caldict['chB_10x_slope'])
else:
trigcts = 511 # 0V
trigcts_l = int(trigcts%(2**8))
trigcts_h = int((trigcts%(2**16))/(2**8))
sendcmd(handle,('S T ' + str(trigcts_h) + ' ' + str(trigcts_l)))
handle.close()
def get_uncal_triggered_data(handle, trigdict):
"""Return uncalibrated integer data.
If you just ask the CGR for data, you'll get its circular buffer
with the last point acquired somewhere in the middle. This
function rotates the buffer data so that the last point acquired
is the last point in the returned array.
Returned data is:
[ list of channel A integers, list of channel B integers ]
Arguments:
handle -- Serial object for the CGR-101.
trigdict -- Dictionary of trigger settings (see get_trig_dict
for more details.
"""
handle.open()
sendcmd(handle,'S G') # Start the capture
sys.stdout.write('Waiting for ' +
'{:0.2f}'.format(trigdict['triglev']) +
'V trigger at ')
if trigdict['trigsrc'] == 0:
print('input A...')
elif trigdict['trigsrc'] == 1:
print('input B...')
elif trigdict['trigsrc'] == 2:
print('external input...')
retstr = ''
# The unit will reply with 3 bytes when it's done capturing data:
# "A", high byte of last capture location, low byte
# Wait on those three bytes.
while (len(retstr) < 3):
retstr = handle.read(10)
lastpoint = int(binascii.hexlify(retstr)[2:],16)
module_logger.debug('Capture ended at address ' + str(lastpoint))
sendcmd(handle,'S B') # Query the data
retdata = handle.read(5000) # Read until timeout
hexdata = binascii.hexlify(retdata)[2:]
module_logger.debug('Got ' + str(len(hexdata)/2) + ' bytes')
handle.close()
bothdata = [] # Alternating data from both channels
adecdata = [] # A channel data
bdecdata = [] # B channel data
# Data returned from the unit has alternating words of channel A
# and channel B data. Each word is 16 bits (four hex characters)
for samplenum in range(2048):
sampleval = int(hexdata[(samplenum*4):(samplenum*4 + 4)],16)
bothdata.append(sampleval)
adecdata = collections.deque(bothdata[0::2])
adecdata.rotate(1024-lastpoint)
bdecdata = collections.deque(bothdata[1::2])
bdecdata.rotate(1024-lastpoint)
return [list(adecdata),list(bdecdata)]
def reset(handle):
""" Perform a hardware reset.
"""
handle.open()
sendcmd(handle,('S D 1' )) # Force the reset
sendcmd(handle,('S D 0' )) # Return to normal
handle.close()
def force_trigger(handle, ctrl_reg):
"""Force a trigger.
This sets bit 6 of the control register to configure triggering
via the external input, then sends a debug code to force the
trigger.
Arguments:
handle -- Serial object for the CGR-101.
ctrl_reg -- Value of the control register.
"""
old_reg = ctrl_reg
new_reg = ctrl_reg | (1 << 6)
handle.open()
sendcmd(handle,'S G') # Start the capture
sendcmd(handle,('S R ' + str(new_reg))) # Ready for forced trigger
module_logger.info('Forcing trigger')
sendcmd(handle,('S D 5')) # Force the trigger
sendcmd(handle,('S D 4')) # Return the trigger to normal mode
# Put the control register back the way it was
sendcmd(handle,('S R ' + str(old_reg)))
handle.close()
def get_uncal_forced_data(handle,ctrl_reg):
""" Returns uncalibrated data from the unit after a forced trigger.
Returned data is:
[ list of channel A integers, list of channel B integers ]
Arguments:
handle -- Serial object for the CGR-101.
ctrl_reg -- Value of the control register.
"""
force_trigger(handle, ctrl_reg)
handle.open()
sendcmd(handle,'S B') # Query the data
retdata = handle.read(5000)
hexdata = binascii.hexlify(retdata)[2:]
module_logger.debug('Got ' + str(len(hexdata)/2) + ' bytes')
handle.close()
# There is no last capture location for forced triggers. Setting
# lastpoint to zero doesn't rotate the data.
lastpoint = 0
bothdata = [] # Alternating data from both channels
adecdata = [] # A channel data
bdecdata = [] # B channel data
# Data returned from the unit has alternating words of channel A
# and channel B data. Each word is 16 bits (four hex characters)
for samplenum in range(2048):
sampleval = int(hexdata[(samplenum*4):(samplenum*4 + 4)],16)
bothdata.append(sampleval)
adecdata = collections.deque(bothdata[0::2])
adecdata.rotate(1024-lastpoint)
bdecdata = collections.deque(bothdata[1::2])
bdecdata.rotate(1024-lastpoint)
return [list(adecdata),list(bdecdata)]
def get_cal_data(caldict,gainlist,rawdata):
"""Return calibrated voltages.
Arguments:
caldict -- Dictionary of calibration constants. See
caldict_default for the keys in this dictionary.
gainlist -- List of gain settings:
[Channel A gain, Channel B gain]
rawdata -- List of uncalibrated data downloaded from CGR-101:
[Channel A data, Channel B data]
"""
if gainlist[0] == 0:
# Channel A has 1x gain
chA_slope = caldict['chA_1x_slope']
chA_offset = caldict['chA_1x_offset']
elif gainlist[0] == 1:
# Channel A has 10x gain
chA_slope = caldict['chA_10x_slope']
chA_offset = caldict['chA_10x_offset']
if gainlist[1] == 0:
# Channel B has 1x gain
chB_slope = caldict['chB_1x_slope']
chB_offset = caldict['chB_1x_offset']
elif gainlist[1] == 1:
# Channel B has 10x gain
chB_slope = caldict['chB_10x_slope']
chB_offset = caldict['chB_10x_offset']
# Process channel A data
cha_voltdata = []
for sample in rawdata[0]:
cha_voltdata.append((511 - (sample + chA_offset))*chA_slope)
# Process channel B data
chb_voltdata = []
for sample in rawdata[1]:
chb_voltdata.append((511 - (sample + chB_offset))*chB_slope)
return [cha_voltdata,chb_voltdata]
| mit | 7,059,296,492,525,935,000 | 32.877215 | 89 | 0.59302 | false |
FreddieShoreditch/evernote_clone_to_folder | EvernoteSync.py | 1 | 11918 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import binascii
import collections
import copy
import getopt
import json
import os
import random
import re
import string
import subprocess
import sys
import xmltodict as xmltodict
from evernote.api.client import EvernoteClient, NoteStore
from evernote.edam.error import ttypes as Types
from evernote.edam.limits import constants as Limits
from flask.ext.api import FlaskAPI as Flask
from flask.ext.api.parsers import URLEncodedParser
from html5print import HTMLBeautifier
def EDAMError(error):
if error.errorCode is Types.EDAMErrorCode.RATE_LIMIT_REACHED:
print 'Rate Limit Exceeded:\tTry again in ', \
error.rateLimitDuration / 60, ' minutes, ', \
error.rateLimitDuration % 60, ' seconds.'
sys.exit(1)
else:
print error
def parse_query_string(auth_url):
uargs = auth_url.split('?')
vals = {}
if len(uargs) <= 1:
raise Exception('Invalid Authorisation URL')
for kv in uargs[1].split('&'):
key, value = kv.split('=', 1)
vals[key] = value
return vals
def get_os() :
p = sys.platform
if (p == 'darwin'):
return 'Mac OS X'
elif (p == 'win32'):
return 'Windows'
else:
return 'Linux or other'
def get_token():
if os.path.exists('token.json'):
try:
with open('token.json', 'r') as f:
return json.loads(f.read())
except ValueError as e:
return None
return None
def get_client() :
token = get_token()
if not token:
print 'Username:\t',
username = raw_input()
print 'Password:\t',
password = raw_input()
print '\n'
client = EvernoteClient(
consumer_key = 'freddieshoreditch-8876',
consumer_secret = '13801fb7745664f3',
sandbox = False
)
req_token = client.get_request_token('http://localhost/')
os_ = get_os()
url_ = client.get_authorize_url(req_token)
if (os_ == 'Mac OS X'):
command = 'open {}'.format(url_)
elif (os == 'Windows'):
print 'Unimplemented for Windows.'
sys.exit(3)
elif (os == 'Linux or other'):
print 'Unimplemented for Linux or other.'
sys.exit(3)
else:
print 'Unimplemented for your operating system.'
sys.exit(3)
tries = 0
exit_code = -1
while (exit_code != 0 and tries < 5):
tries += 1
exit_code = subprocess.call(command, shell=True)
if exit_code != 0:
print 'Could not open authorisation url, please open it manually:',
print url_
print '\n\nPASTE the URL after logging in:\t'
result = raw_input()
vals = parse_query_string(result)
token = client.get_access_token(
req_token['oauth_token'],
req_token['oauth_token_secret'],
vals['oauth_verifier']
)
with open('token.json', 'w') as f:
f.write(json.dumps(token))
return EvernoteClient(token=token), token
# Global variables
try:
# Authenticate with Evernote
client, token = get_client()
userStore = client.get_user_store()
# Get the notestore
noteStore = client.get_note_store()
notebooks = noteStore.listNotebooks()
except Types.EDAMSystemException as e:
EDAMError(e)
# Set a default HTML template
html_template = xmltodict.parse(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?><html><body></body></html>"
)
def find_notebook_with_guid(guid):
nbooks = noteStore.listNotebooks()
for n in nbooks:
if n.guid and n.guid is guid:
return n
pass
def get_notes_from_notebook(notebook):
filter = NoteStore.NoteFilter()
filter.ascending = True
filter.notebookGuid = notebook.guid
spec = NoteStore.NotesMetadataResultSpec()
spec.includeTitle = True
spec.includeUpdated = True
noteList = noteStore.findNotesMetadata(filter, 0,
Limits.EDAM_USER_NOTES_MAX, spec)
while noteList.totalNotes > len(noteList.notes):
appendNoteList = noteStore.findNotesMetadata(filter,
len(noteList.notes),
Limits.EDAM_USER_NOTES_MAX,
spec)
noteList.notes += appendNoteList.notes
return noteList.notes
def add_filename_type(filename, mime):
if mime == 'image/png':
filename += '.png'
elif mime == 'application/json':
filename += '.json'
elif mime == 'application/pdf':
filename += '.pdf'
return filename
def find_replace_enmedia_hash(enmedia, resources):
if u'@hash' in enmedia:
for i in resources:
hexhash = binascii.hexlify(i.data.bodyHash)
if hexhash == enmedia[u'@hash']:
filename = i.attributes.fileName
if not filename:
if u'@alt' in enmedia:
filename = enmedia[u'@alt']
else:
filename = hexhash
if i.mime:
filename = add_filename_type(filename, i.mime)
elif u'@type' in enmedia:
filename = add_filename_type(filename, enmedia[
u'@type'])
i.attributes.fileName = filename
enmedia[u'@src'] = 'attachments/{filename}'.format(
filename=filename)
enmedia[u'@src'] = enmedia[u'@src'].decode('utf8')
del enmedia[u'@hash']
break
def render_files_in_xml(content, html, resources):
if isinstance(content, list):
for i in content:
render_files_in_xml(i, html, resources)
elif isinstance(content, collections.OrderedDict):
for i in content.keys():
if isinstance(content[i], str) or isinstance(content[i], unicode):
continue
elif i == u'en-media':
find_replace_enmedia_hash(content[i], resources)
content[u'img'] = content[i]
del content[i]
elif i == u'en-note':
body = html[u'html'][u'body']
render_files_in_xml(content[i], html, resources)
div = content[i]
if u'div' in body.keys():
if isinstance(body[u'div'], list):
body[u'div'].append(div)
else:
temp = body[u'div']
body[u'div'] = [temp, div]
else:
body[u'div'] = div
else:
render_files_in_xml(content[i], html, resources)
def process_enml_media(enml, resources):
content = xmltodict.parse(enml)
html = copy.deepcopy(html_template)
html[u'html'][u'body'] = collections.OrderedDict()
render_files_in_xml(content, html, resources)
return xmltodict.unparse(html, encoding='utf8')
def note_has_updated(n, dir):
if not os.path.exists(dir):
return True
elif not os.path.exists('{0}/info.json'.format(dir)):
return True
else:
try:
with open('{0}/info.json'.format(dir), 'r') as f:
data = json.loads(f.read(), encoding='utf8')
if u'updated' in data.keys():
return n.updated > data[u'updated'] or \
(u'success' not in data.keys())
return True
except:
return True
def validate_filename(title):
if title is not None:
title = re.sub(r'[\/\\\n\\\r\\\t]', r' ', title)
return title
def validate_filenames(data):
if data is None:
return
for d in data:
if hasattr(d, 'attributes'):
d.attributes.fileName = validate_filename(d.attributes.fileName)
def write(notebook, notes, out_dir=''):
notebook_name = notebook.name
count = 0
totalCount = len(notes)
for n in notes:
count += 1
title = n.title
print '\t\t{count} of {total}:\t{note}'.format(count=count,
total=totalCount,
note=title)
title = validate_filename(title)
dir = '{out_dir}{parent}/{child}'.format(parent=notebook_name,
child=title, out_dir=out_dir)
note_updated = note_has_updated(n, dir)
if note_updated is False:
continue
if not os.path.exists(dir):
os.makedirs(dir)
n = noteStore.getNote(token, n.guid, True, True, False, False)
enml = n.content
validate_filenames(n.resources)
resources = n.resources
tags = []
if n.tagGuids:
for i in n.tagGuids:
tag = noteStore.getTag(i)
tags.append(tag.name)
# Print information about the note to file
info = {"title": title, "created": n.created, "updated": n.updated,
"enml?": enml != None, "tags": tags}
outinfo = '{dir}/info.json'.format(dir=dir)
if (resources):
info['resources_count'] = len(resources)
with open(outinfo, 'w') as f:
f.write(json.dumps(info, indent=2, sort_keys=True))
if (enml):
html = process_enml_media(enml, resources)
html_pretty = HTMLBeautifier.beautify(html, 2)
with open('{dir}/content.html'.format(dir=dir), 'w') as f:
f.write(html_pretty.encode('utf8'))
if (resources):
dir = '{dir}/attachments'.format(dir=dir)
if not os.path.exists(dir):
os.makedirs(dir)
for r in resources:
filename = r.attributes.fileName
if not filename:
filename = ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.digits) for _ in
range(10))
filename = add_filename_type(filename, r.mime)
with open('{dir}/{filename}'.format(dir=dir,
filename=filename),
'wb') as f:
f.write(bytearray(r.data.body))
info['success'] = True
with open(outinfo, 'w') as f:
out = json.dumps(info, indent=2, sort_keys=True)
f.write(out.encode('utf8'))
def backup(settings):
try:
user = userStore.getUser()
print 'Backing up for user {0}...\n'.format(user.username)
print 'Notebooks backed up:'
for n in notebooks:
print '\r\t{name}'.format(name=n.name)
notes = get_notes_from_notebook(n)
write(n, notes, settings['out_dir'])
print
except Types.EDAMSystemException as e:
EDAMError(e)
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "o:v", ["output="])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
sys.exit(2)
settings = {'verbose': False, 'out_dir': ''}
for o, a in opts:
if o == "-v":
settings['verbose'] = True
elif o in ("-o", "--output"):
out_dir = str(a) + "/"
settings['out_dir'] = out_dir
else:
assert False, "unhandled option"
print 'Welcome to the cloning CLI for Evernote.\n' \
'Use this program to clone and backup your Evernote notes and files.\n'
backup(settings)
if __name__ == '__main__':
main()
| mit | -7,427,740,082,707,575,000 | 31.652055 | 81 | 0.534066 | false |
Jozhogg/iris | lib/iris/tests/unit/plot/test_outline.py | 1 | 1466 | # (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.outline` function."""
from __future__ import (absolute_import, division, print_function)
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
from iris.tests.unit.plot import TestGraphicStringCoord
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.outline(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.outline(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
if __name__ == "__main__":
tests.main()
| lgpl-3.0 | 2,724,913,210,227,716,600 | 33.904762 | 74 | 0.731241 | false |
zapcoop/vertex | vertex_api/service/tasks.py | 1 | 5959 | import email
from django.core.exceptions import PermissionDenied
from django.template import Context
from django.db import transaction
from django_mailbox.models import Message
import html2text
from vertex.celery import app
import vertex.rules
from service.models import Ticket, TicketSubscriber, Update, EmailTemplate
from contacts.models import EmailAddress, EmailDomain
@app.task(routing_key='vertex', ignore_result=True)
def create_ticket_from_email_message(message_pk):
with transaction.atomic():
message = Message.objects.get(pk=message_pk)
sender = EmailAddress.objects.filter(email_address=message.from_address[0]).first()
team = message.mailbox.team
ticket = Ticket(
title=message.subject,
priority=5,
message_id=message.message_id
)
if not message.html:
ticket.description = message.text
ticket.description_is_plain = True
else:
text_maker = html2text.HTML2Text()
text_maker.escape_snob = True
ticket.description = text_maker.handle(message.html)
if sender and sender.person:
ticket.created_by = sender.person
ticket.signaled_by = sender.person
if sender and sender.organization:
ticket.organization = sender.organization
else:
ticket.organization = _organization_from_domain(message.from_address[0].split('@')[1])
ticket.save() # TODO: send a confirmation email
ticket.teams.add(team)
add_subscribers_from_email(message, ticket)
@app.task(routing_key='vertex', ignore_result=True)
def update_ticket_from_email_message(message_pk, hash_id):
message = Message.objects.get(pk=message_pk)
sender = EmailAddress.objects.filter(email_address=message.from_address[0]).first()
pk, = Ticket.HASHIDS.decode(hash_id)
ticket = Ticket.objects.filter(pk=pk).first()
if (ticket is None) or (sender is None):
template = EmailTemplate.objects.get(slug='no_such_ticket')
template.send_service_email_bilingual(
context=Context(),
from_mailbox=message.mailbox,
to=message.from_address[0],
cc=None,
in_reply_to_message=message
)
elif has_update_permission(sender, ticket):
with transaction.atomic():
if message.html:
text_maker = html2text.HTML2Text()
text_maker.escape_snob = True
body = text_maker.handle(message.html)
Update.objects.create(
ticket=ticket,
person=sender.person,
body=body,
body_is_plaintext=False,
message_id=message.message_id
)
else:
Update.objects.create(
ticket=ticket,
person=sender.person,
body=message.text,
body_is_plaintext=True,
message_id=message.message_id
)
add_subscribers_from_email(message, ticket)
elif has_view_permission(sender, ticket):
template = EmailTemplate.objects.get(slug='insufficient_permissions')
template.send_service_email_bilingual(
context=Context(),
from_mailbox=message.mailbox,
to=message.from_address[0],
cc=None,
in_reply_to_message=message
)
else:
template = EmailTemplate.objects.get(slug='no_such_ticket')
template.send_service_email_bilingual(
context=Context(),
from_mailbox=message.mailbox,
to=message.from_address[0],
cc=None,
in_reply_to_message=message
)
def has_update_permission(sender, ticket):
try:
subscription = TicketSubscriber.objects.get(email_address=sender, ticket=ticket)
except TicketSubscriber.DoesNotExist:
pass
else:
return subscription.can_update
if sender.person:
try:
return vertex.rules.has_perm('service.change_ticket', sender.person, ticket)
except PermissionDenied:
return False
elif sender.organization:
return ticket.organization == sender.organization
return False
def has_view_permission(sender, ticket):
try:
subscription = TicketSubscriber.objects.get(email_address=sender, ticket=ticket)
except TicketSubscriber.DoesNotExist:
pass
else:
return subscription.can_view
if sender.person:
try:
return vertex.rules.has_perm('service.view_ticket', sender.person, ticket)
except PermissionDenied:
return False
elif sender.organization:
return ticket.organization == sender.organization
return False
def _organization_from_domain(domain_name):
try:
domain = EmailDomain.objects.get(domain_name__iexact=domain_name)
return domain.organization
except EmailDomain.DoesNotExist:
return None
def add_subscribers_from_email(message, ticket):
subscriber_emails = subscribers_specified_in_message(message)
ticket_subscribers = list()
for email_address in subscriber_emails:
ticket_subscribers.append(
TicketSubscriber(email_address=email_address, ticket=ticket, can_view=True)
)
TicketSubscriber.objects.bulk_create(ticket_subscribers)
def subscribers_specified_in_message(msg): # TODO make this case-insensitive
return EmailAddress.objects.filter(email_address__in=(msg.from_address + get_cc_addresses(msg)))
def get_cc_addresses(msg):
cc_addresses = list()
cc_headers = msg.get_email_object().get_all('CC')
if cc_headers is None:
return []
for (name, address) in email.utils.getaddresses(cc_headers):
cc_addresses.append(address)
return cc_addresses
| agpl-3.0 | 1,361,053,991,817,043,000 | 32.477528 | 100 | 0.633999 | false |
jwkanggist/EveryBodyTensorFlow | tf_basic/ex_runTFmulmat.py | 1 | 1142 | #-*- coding: utf-8 -*-
"""
#---------------------------------------------
filename: ex_runTFmatmul.py
- Construct a computational graph which calculate
a matrix multiplication in Tensorflow
- Use tf.constant() in a matrix form
Written by Jaewook Kang
2017 Aug.
#-------------------------------------------
"""
import tensorflow as tf
# computational TF graph construction ================================
# 1x2 형렬을 만드는 Constant 연산을 생성합니다.
# 이 연산자는 기본 그래프의 노드로 추가됩니다.
#
# 생성자에 의해 반환된 값(matrix1)은 Constant 연산의 출력을 나타냅니다.
matrix1 = tf.constant([[3., 3.]]) # 1 by 2
# 2x1 행렬을 만드는 또 다른 Constant를 생성합니다.
matrix2 = tf.constant([[2.],[2.]]) # 2 by 1
# 'matrix1'과 'matrix2'를 입력으로 받는 Matmul 연산을 생성합니다.
# 반환값인 'product'는 행렬을 곱한 결과를 나타냅니다.
product = tf.matmul(matrix1, matrix2)
# 계산 그래프로 세션을 생성==================
sess = tf.Session()
result = sess.run(product)
print(result)
# ==> [[ 12.]]
sess.close() | unlicense | 6,864,422,983,591,427,000 | 23.026316 | 70 | 0.572368 | false |
kmee/pySigepWeb | pysigepweb/resposta_rastreamento.py | 1 | 3331 | # -*- coding: utf-8 -*-
# #############################################################################
#
# Brazillian Carrier Correios Sigep WEB
# Copyright (C) 2015 KMEE (http://www.kmee.com.br)
# @author: Michell Stuttgart <[email protected]>
#
# Sponsored by Europestar www.europestar.com.br
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from xml.etree.ElementTree import ElementTree, fromstring
class Destino(object):
def __init__(self, root):
self.local = root.find('local').text
self.codigo = root.find('codigo').text
self.cidade = root.find('cidade').text
self.bairro = root.find('bairro').text
self.uf = root.find('uf').text
class Evento(object):
def __init__(self, root):
self.tipo = root.find('tipo').text
self.status = root.find('status').text
self.data = root.find('data').text
self.hora = root.find('hora').text
self.descricao = root.find('descricao').text
aux = root.find('recebedor')
self.recebedor = aux.text if aux else None
aux = root.find('documento')
self.documento = aux.text if aux else None
aux = root.find('comentario')
self.comentario = aux.text if aux else None
self.local = root.find('local').text
self.codigo = root.find('codigo').text
self.cidade = root.find('cidade').text
self.uf = root.find('uf').text
self.sto = root.find('sto').text
root_destino = root.find('destino')
self.destino = Destino(root_destino) if len(root_destino) else None
class Objeto(object):
def __init__(self, root):
self.numero = root.find('numero').text
self.eventos = []
for evento in root.findall('evento'):
self.eventos.append(Evento(evento))
class RespostaRastreamento(object):
def __init__(self, xml_retorno, etiquetas, backup_path=''):
# tag raiz do xml
root = fromstring(xml_retorno)
self.error = ''
self.versao = root.find('versao').text
self.qtd = 0
self.tipo_pesquisa = ''
self.tipo_resultado = ''
self.objetos = {}
self._parse(root)
def _parse(self, root):
self.error = root.find('error')
if self.error == None:
self.qtd = root.find('qtd').text
self.tipo_pesquisa = root.find('TipoPesquisa').text
self.tipo_resultado = root.find('TipoResultado').text
for obj in root.findall('objeto'):
aux = Objeto(obj)
self.objetos[aux.numero] = aux
| agpl-3.0 | 1,957,256,924,137,587,700 | 31.339806 | 79 | 0.589613 | false |
t1g0r/ramey | src/backend/sensor/__init__.py | 1 | 1366 | import sys
sys.path.append("backend/command/")
import time
from GPIOHandler import GPIOHandler
import RPi.GPIO as gpio
from utils import Parameter
from pprint import pprint
class MotionSensor(object):
"""docstring for MotionSensor"""
def __init__(self, config):
super(MotionSensor, self).__init__()
self.active = True
self.config = config
self.callback = {}
self.counter = 0
self.params = {}
self.Pin = config["pin"]
self.dbconn = config["dbconn"]
self.params["gpio_setup"] = "in"
self.ghandler = GPIOHandler(self.Pin.split(","),self.params,gpio.PUD_DOWN)
self.ghandler.Add_Event_Handler(pin=self.Pin.split(","),gpiosetup=gpio.BOTH,callback=self.OnMotion,bounce_time=50)
buzzerpin = Parameter.getValuebyFieldname(self.dbconn,"sensor_motion","buzzer")
# print buzzerpin
self.params["gpio_setup"] = "out"
self.buzzerHandler = GPIOHandler(buzzerpin.split(","),self.params)
def OnMotion(self,channel):
if ((gpio.input(int(self.Pin))) and (self.active)):
self.buzzerHandler.echo()
if len(self.callback) > 0:
for call in self.callback:
self.callback[call]()
def Execute(self):
try:
while True:
time.sleep(1)
except KeyboardInterrupt, e:
print "Error happened!"
def AddCallback(self,callback):
self.callback[self.counter] = callback
self.counter += 1
def test(self):
print self.config | gpl-3.0 | -9,164,296,870,604,272,000 | 25.803922 | 116 | 0.702782 | false |
SKA-ScienceDataProcessor/integration-prototype | sip/tango_control/tango_master/app/sdp_master_ds.py | 1 | 1281 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""SIP SDP Tango Master Device server.
Run with:
```bash
python3 sdp_master_ds.py 1 -v4
```
"""
import sys
from tango import Database, DbDevInfo
from tango.server import run
from sdp_master_device import SDPMasterDevice
from sip_logging import init_logger
from release import LOG, __service_id__
def register_master():
"""Register the SDP Master device."""
tango_db = Database()
device = "sip_sdp/elt/master"
device_info = DbDevInfo()
device_info._class = "SDPMasterDevice"
device_info.server = "sdp_master_ds/1"
device_info.name = device
devices = tango_db.get_device_name(device_info.server, device_info._class)
if device not in devices:
LOG.info('Registering device "%s" with device server "%s"',
device_info.name, device_info.server)
tango_db.add_device(device_info)
def main(args=None, **kwargs):
"""Run the Tango SDP Master device server."""
LOG.info('Starting %s', __service_id__)
return run([SDPMasterDevice], verbose=True, msg_stream=sys.stdout,
args=args, **kwargs)
if __name__ == '__main__':
init_logger(logger_name='', show_log_origin=True)
init_logger(show_log_origin=True)
register_master()
main()
| bsd-3-clause | 7,163,578,524,767,934,000 | 26.255319 | 78 | 0.657299 | false |
rdevon/cortex | cortex/built_ins/datasets/utils.py | 1 | 1457 | """
Extra functions for build-in datasets
"""
import torchvision.transforms as transforms
def build_transforms(normalize=True, center_crop=None, image_size=None,
random_crop=None, flip=None, random_resize_crop=None,
random_sized_crop=None, use_sobel=False):
"""
Args:
normalize:
center_crop:
image_size:
random_crop:
flip:
random_resize_crop:
random_sized_crop:
use_sobel:
Returns:
"""
transform_ = []
if random_resize_crop:
transform_.append(transforms.RandomResizedCrop(random_resize_crop))
elif random_crop:
transform_.append(transforms.RandomCrop(random_crop))
elif center_crop:
transform_.append(transforms.CenterCrop(center_crop))
elif random_sized_crop:
transform_.append(transforms.RandomSizedCrop(random_sized_crop))
if image_size:
if isinstance(image_size, int):
image_size = (image_size, image_size)
transform_.append(transforms.Resize(image_size))
if flip:
transform_.append(transforms.RandomHorizontalFlip())
transform_.append(transforms.ToTensor())
if normalize:
if isinstance(normalize, transforms.Normalize):
transform_.append(normalize)
else:
transform_.append(transforms.Normalize(*normalize))
transform = transforms.Compose(transform_)
return transform
| bsd-3-clause | 2,229,084,709,715,869,200 | 26.490566 | 75 | 0.638298 | false |
ilyapatrushev/isimage | isimage/select_images/check_cleared.py | 1 | 1563 | """ Utility module for command line script select_images
"""
# Author: Ilya Patrushev [email protected]
# License: GPL v2.0
import os
import numpy as np
import scipy.linalg as la
from cPickle import load
import cv2
def image_colour_distribution(img):
"""
Extract colour distribution parameters.
Parameters
----------
img: array [height, width]
The RGB image
Returns
-------
array[9]
colour distribution parameters
"""
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
m = np.mean(lab.reshape(-1, 3), axis=0)
s = la.cholesky(np.cov(lab.reshape(-1, 3).T))
del lab
return np.hstack([ m, s[np.triu_indices(3)]])
def check_cleared(images, model_path=None):
"""
Classify images as cleared or un-cleared based on a GMM.
Parameters
----------
images: list
The RGB images
model_path: str
Path to the damp of GMM
Returns
-------
list
True if the corresponding image is classified as cleared.
"""
if model_path is None:
return np.zeros_like(images, dtype=bool)
if len(images) == 0:
return []
X = np.asarray([image_colour_distribution(img) for img in images])
assert(os.path.exists(model_path))
model, mu, sig, gcl = load(open(model_path))
labels = model.predict((X - mu)/sig)
return labels == gcl
| gpl-2.0 | 4,469,173,557,604,382,700 | 20.708333 | 70 | 0.544466 | false |
codeWangHub/machineLearningAnywhere | neural network algorithm/1st_basic_knn/knn.py | 1 | 2797 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # 绘制三维图需要这个工具包
# 加载数据集
def loadData(file):
with open(file) as f :
lines = f.readlines()
size = len(lines) # 获得文件的行数
data = np.zeros((size,3)) # 保存数据
labels = [] # 保存分类
# 解析行
index = 0
for line in lines:
line = line.strip() # 去掉两端空格
terms = line.split('\t') # 按TAB分割
data[index,:] = terms[0:3]
mark = terms[-1][0]
if mark == 's': # smallDoses
labels.append(2)
elif mark == 'd': # didntLike
labels.append(1)
elif mark == 'l': # largeDoses
labels.append(3)
else :
print("error")
index += 1
return data,labels
# 绘制数据分布<三维>
def normalize(data) :
'data must be a numpy array not a list'
max = data.max(0) # 找出最大值
min = data.min(0) # 最小值
# new = (old - min) / (max-min)
newValue = (data - min) / (max-min)
return newValue,max,min
# 计算距离 data 与dataSet中每个元素的距离
def distance(dataSet,data) :
return np.sum(np.square(dataSet - np.tile(data,(dataSet.shape[0],1))),
axis = 1) # 三个坐标的平方和
# 加载解析数据
data,labels = loadData('datingTestSet.txt')
#数据归一化
normalized_data,max_value,min_value = normalize(data)
#ax=plt.subplot(111,projection='3d') #创建一个三维的绘图工程
#colors = ['r','g','b']
#for i in range(data.shape[0]) :
# ax.scatter(normalized_data[i,0],normalized_data[i,1],normalized_data[i,2],c=colors[labels[i]-1])
#plt.show()
# knn
def prodution(fly,play,ice,k):
classifiction = {}
# 归一化
norm = ( np.array([fly,play,ice]) - min_value ) / (max_value-min_value)
#print('data nromal = ' , norm)
dis = distance(normalized_data,norm) # 计算当前预测特征,与数据集中各个元素的距离
#找出距离最短的 k 个的 index,因为最后要的是label
sorted_distance = dis.argsort() # 返回排序后元素的index
for i in range(k):
index = sorted_distance[i] # get :存在返回,不存在返回0
classifiction[labels[index]] = classifiction.get(labels[index],0) + 1
return classifiction
def classifiction(class_dict) :
maxvlaue = 0
clazz_index = 0
#print(class_dict)
for key,value in class_dict.items() :
if maxvlaue < value :
maxvlaue = value
clazz_index = key
return clazz_index
# 计算正确率
def accuracy(datas,lables,K) :
rigth = 0
cnt = len(datas)
for i in range(cnt) :
clszz_dict = prodution(datas[i][0],datas[i][1],datas[i][2],K)
if classifiction(clszz_dict) == labels[i] :
rigth += 1
return rigth,rigth/cnt
right , acc = accuracy(data,labels,15)
print('right = {} , accuracy = {}'.format(str(right),str(acc)) )
| apache-2.0 | 8,553,475,345,737,452,000 | 20.034188 | 98 | 0.645672 | false |
xnmp/lambdaquery | LambdaQuery/functions.py | 1 | 16156 | from functools import wraps
from LambdaQuery.query import *
def asExpr(value):
if isinstance(value, Columns):
if len(value) > 1:
raise TypeError(f"Can't convert Columns with more than one Expr to Expr: {value}")
return value.asExpr()
elif isinstance(value, ConstExpr.allowedTypes):
return ConstExpr(value)
elif isinstance(value, Expr):
return value
raise TypeError(f"Cannot convert to Expr: {value} of type {type(value)}")
def labelResult(func, args):
return func.__name__.strip('_') + '_' + args.bind(lambda x: x.keys() if type(x) is Columns
else L(x.__class__.__name__.lower())).intersperse('_')
def augment(func):
# Columns -> Columns and Columns -> Query
# all we want to do is lift func to something that carries through the joinconds and the groupbys
# the complication is that we need it to addquery, or do we?
@wraps(func)
def mfunc(*args, **kwargs):
res = func(*args, **kwargs)
# if isinstance(res, Query): res = res.joinM()
colargs = L(*args).filter(lambda x: isinstance(x, Columns))
oldgroupbys = colargs.bind(lambda x: x.groupbys)
oldjc = colargs.fmap(lambda x: x.asQuery()).fold(lambda x, y: x | y)
if isinstance(res, Query) and type(res.columns) is not Columns:
for table in oldgroupbys.getTables():
table.derivatives += res.columns.getTables()[0]
res.groupbys = oldgroupbys + res.groupbys
# res.joincond @= colargs.fmap(lambda x: x.asQuery()).combine()
if isinstance(res, Columns):
res = addQuery(oldjc, res.asQuery(), addcols='right').asCols()
if type(res) is Columns: res = res.label(labelResult(func, colargs))
else:
res = addQuery(oldjc, res.asQuery(), addcols='right')
# this breaks things
# res.columns = res.columns.label(func.__name__)
return res
return mfunc
def lift(func):
"""
Lifts Expr -> Expr to Columns -> Columns. "Applicative instance for Columns"
"""
@wraps(func)
def colfunc(*args, **kwargs):
res = Columns()
colargs = L(*args).filter(lambda x: isinstance(x, Columns))
res[labelResult(func, colargs)] = func(*L(*args).fmap(asExpr), **kwargs)
# replica of augment logic
res.groupbys = colargs.bind(lambda x: x.groupbys)
# we're NOT using addQuery here
res.joincond &= colargs.fmap(lambda x: x.joincond).fold(lambda x, y: x & y, mzero=AndExpr())
# res.joincond @= colargs.fmap(lambda x: x.asQuery()).combine()
# oldjc = colargs.fmap(lambda x: x.asQuery()).fold(lambda x, y: x @ y)
# res = addQuery(oldjc, res.asQuery(), addcols='right').asCols()
return res
setattr(Columns, func.__name__, colfunc)
setattr(Expr, '_' + func.__name__, func)
return colfunc
def injective(fname=None):
def decorator(func):
nonlocal fname
if fname is None:
fname = func.__name__
@property
def colfunc(self, *args, **kwargs):
if hasattr(self, fname + '_saved'):
return getattr(self, fname + '_saved')
res = func(self, *args, **kwargs)
# another replica of augment
res.groupbys = self.groupbys + res.groupbys
if type(res) is Query and type(res.columns) is not Columns:
for table in res.groupbys.getTables():
table.derivatives += res.columns.getTables()[0]
res = addQuery(self.asQuery(), res.asQuery(), addcols='right').one
# res.joincond &= self.joincond
# STOP using primary as a way to track where the column came from, that's the role of the group bys
object.__setattr__(self, fname + '_saved', res)
return res
setattr(Columns, func.__name__, colfunc)
return colfunc
return decorator
def sqlfunc(strfunc):
@lift
@wraps(strfunc)
def exprfunc(*exprs, **kwargs):
return FuncExpr(strfunc, *exprs, **kwargs)
return exprfunc
def aggfunc(strfunc):
@lift
@wraps(strfunc)
def exprfunc(*exprs, **kwargs):
return AggExpr(strfunc, *exprs, **kwargs)
@wraps(strfunc)
def qfunc(q0, colname=None, **kwargs):
q0 = copy(q0)
if colname is not None:
q0 = q0.fmap(lambda x: getattr(x, colname))
elif len(q0.columns) > 1:
# this is so you can do milestones().count() instead of milestones().count('trid')
q0.columns = q0.getPrimary()
# q0 = q0.fmap(lambda x: q0.getPrimary())
return q0.aggregate(lambda x: exprfunc(x, **kwargs))
setattr(Query, strfunc.__name__[:-1], qfunc)
return exprfunc
def windowfunc(strfunc):
@lift
@wraps(strfunc)
def exprfunc(*exprs, **kwargs):
return WindowExpr(strfunc, *exprs, **kwargs)
@wraps(strfunc)
def qfunc(q0, *args, **kwargs):
args = L(*args).fmap(lambda x: getattr(q0.columns, x) if type(x) is str else x)
return q0.aggregate(lambda x: exprfunc(*args, **kwargs), ungroup=False)
setattr(Query, strfunc.__name__[:-1], qfunc)
@wraps(strfunc)
def colfunc(col, *args, **kwargs):
return qfunc(col.asQuery(), col, *args, **kwargs)
setattr(Columns, strfunc.__name__, colfunc)
return exprfunc
# this dooesn't work...
def memoize(fname=None):
def decorator(func):
nonlocal fname
if fname is None:
fname = func.__name__
@wraps(func)
def mfunc(self, *args, **kwargs):
if hasattr(self, func.__name__ + '_saved'):
return getattr(self, func.__name__ + '_saved')
res = func(*args, **kwargs)
object.__setattr__(self, func.__name__ + '_saved', res)
return res
return mfunc
return decorator
# def addJC(func):
# @wraps(func)
# def mfunc(*args, **kwargs):
# return func(*args, **kwargs).joinM() + Query.unit(*L(*args).filter(lambda x: isinstance(x, Columns)))
# return mfunc
class Lifted(object):
def __init__(self, func):
self.func = augment(func)
setattr(Columns, func.__name__, self.func)
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
class Kleisli(object):
def __init__(self, func):
self.func = augment(func)
setattr(Columns, func.__name__, self.func)
setattr(Query, func.__name__, lambda x: x.bind(self.func))
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
# %% ^━━━━━━━━━━━━━━━━ OPERATORS ━━━━━━━━━━━━━━━━━━━━^
@lift
def __eq__(self, other):
return EqExpr(self, other)
@lift
def __gt__(self, other):
return BinOpExpr(">", self, other)
@lift
def __ge__(self, other):
return BinOpExpr(">=", self, other)
@lift
def __lt__(self, other):
return BinOpExpr("<", self, other)
@lift
def __le__(self, other):
return BinOpExpr("<=", self, other)
@lift
def __ne__(self, other):
return BinOpExpr("!=", self, other)
@lift
def __add__(self, other):
if self.isTime() and type(other) is ConstExpr and isinstance(other.value, int):
other = ConstExpr(timedelta(days = other.value))
return BinOpExpr("+", self, other)
@lift
def __sub__(self, other):
if self.isTime() and other.isTime():
self = Expr._epoch_(self)
other = Expr._epoch_(other)
return BinOpExpr("-", self, other)
@lift
def __mul__(self, other):
return BinOpExpr("*", self, other)
@lift
def __truediv__(self, other):
return BinOpExpr(":: FLOAT /", self, other)
@lift
def __radd__(self, other):
return BinOpExpr("+", other, self)
@lift
def __rsub__(self, other):
return BinOpExpr("-", other, self)
@lift
def __rmul__(self, other):
return BinOpExpr("*", other, self)
@lift
def __rtruediv__(self, other):
return BinOpExpr(":: FLOAT /", other, self)
@lift
def __floordiv__(self, other):
return BinOpExpr("/", self, other)
@lift
def __or__(self, other):
return BinOpExpr("OR", self, other)
@lift
def __and__(self, other):
return self & other
@sqlfunc
def __invert__(expr):
if isinstance(expr, FuncExpr) and expr.func.__name__ == "__invert__":
return str(expr).replace("NOT ", "")
if isinstance(expr, FuncExpr) and expr.func.__name__ == "notnull_":
return str(expr).replace("NOT NULL", "NULL")
return f"NOT ({expr})"
@sqlfunc
def __neg__(expr):
return f"-{expr}"
# %% ^━━━━━━━━━━━━━━━━━━━ COLUMN FUNCTIONS ━━━━━━━━━━━━━━━━━^
@sqlfunc
def round_(expr, interval=86400/4):
return f"TIMESTAMP WITH TIME ZONE 'EPOCH' + FLOOR(EXTRACT(EPOCH FROM {expr}) / {interval}) * {interval} * INTERVAL '1 second'"
@sqlfunc
def zerodiv_(numer, denom):
return f"COALESCE({numer} :: FLOAT / NULLIF({denom}, 0), 0)"
@sqlfunc
def len_(expr):
return f"CHAR_LENGTH({expr})"
@sqlfunc
def in_(expr, *inlist):
return f"{expr} IN {inlist}"
@sqlfunc
def like_(expr1, likestr=None):
return f"{expr1} LIKE {likestr}"
@sqlfunc
def ilike_(expr1, likestr=None):
return f"{expr1} ILIKE \'%{likestr}%\'"
@sqlfunc
def epoch_(expr):
return f"EXTRACT(EPOCH FROM {expr})"
@sqlfunc
def randomize_(expr):
return f"STRTOL(SUBSTRING(MD5({expr}), 1, 8), 16) :: FLOAT / (STRTOL('ffffffff', 16))"
@sqlfunc
def if_(cond, expr1, expr2):
return f"CASE WHEN {cond} THEN {expr1} ELSE {expr2} END"
@sqlfunc
def ifen_(expr, cond):
if type(cond) is FuncExpr and cond.func.__name__ == "ifen_":
cond = cond.children[1] & cond.children[0]
if type(expr) is FuncExpr and expr.func.__name__ == "ifen_":
cond &= expr.children[1]
expr = expr.children[0]
return f"CASE WHEN {cond} THEN {expr} END"
@sqlfunc
def case_(*pairs):
finalexpr = pairs[-1]
res = "CASE "
for i in range(0, len(pairs) - 2, 2):
res += f"\n WHEN {pairs[i]} THEN {pairs[i+1]} "
else:
res += f"\n ELSE {finalexpr} \nEND"
return res
@sqlfunc
def roundnum_(expr, interval=1, up=False):
if not up:
return f"FLOOR({expr} :: FLOAT / {interval}) * {interval}"
else:
return f"CEILING({expr} :: FLOAT / {interval}) * {interval}"
@sqlfunc
def cast_(expr, sqltype):
strtype = str(sqltype)[1:-1]
return f"({expr}) :: {strtype}"
@sqlfunc
def roundtime_(expr, interval=86400):
# Expr._round_(Expr._epoch_(expr), interval)
return f"TIMESTAMP WITH TIME ZONE 'EPOCH' + FLOOR(EXTRACT(EPOCH FROM {expr}) / {interval}) * {interval} * INTERVAL '1 second'"
@sqlfunc
def coalesce_(*exprs):
return "COALESCE(" + ', '.join(map(str, exprs)) + ")"
@sqlfunc
def from_unixtime_(expr):
return f"TIMESTAMP WITH TIME ZONE 'EPOCH' + {expr} * INTERVAL '1 SECOND'"
@sqlfunc
def log_(expr):
return f"LN(GREATEST({expr}, 0) + 1)"
@sqlfunc
def exp_(expr):
return f"EXP({expr})"
@sqlfunc
def floor_(expr):
return f"FLOOR({expr})"
@sqlfunc
def isnull_(expr):
if type(expr) is FuncExpr and expr.func.__name__ == "ifen_":
return f'{expr.children[0]} IS NULL OR NOT {expr.children[1]}'
return f"{expr} IS NULL"
@sqlfunc
def notnull_(expr):
if type(expr) is FuncExpr and expr.func.__name__ == "ifen_":
return f'{expr.children[0]} IS NOT NULL AND {expr.children[1]}'
return f"{expr} IS NOT NULL"
@sqlfunc
def least_(*exprs):
parts = ', '.join(map(str, exprs))
return f"LEAST({parts})"
@sqlfunc
def zscore_(expr, partitionexpr):
return f"({expr} - AVG({expr}) OVER (PARTITION BY {partitionexpr})) :: FLOAT / STDDEV({expr}) OVER (PARTITION BY {partitionexpr})"
@sqlfunc
def greatest_(*exprs):
parts = ', '.join(map(str, exprs))
return f"GREATEST({parts})"
@sqlfunc
def row_(partitionexpr, orderexpr):
return f"ROW_NUMBER() OVER (PARTITION BY {partitionexpr} ORDER BY {orderexpr})"
# %% ^━━━━━━━━━━━━━━━━━━ AGGREGATES ━━━━━━━━━━━━━━━━━━━━^
@aggfunc
def count_(expr):
return f'COUNT(DISTINCT {expr})'
@aggfunc
def avg_(expr, ntile=None):
# expr = Expr._coalesce_(expr, 0)
# return f'AVG({expr})'
return f'AVG(COALESCE({expr}, 0))'
@aggfunc
def sum_(expr):
return f'SUM({expr})'
@aggfunc
def max_(expr):
return f'MAX({expr})'
@aggfunc
def min_(expr):
return f'MIN({expr})'
@aggfunc
def any_(expr):
return f'BOOL_OR({expr})'
@aggfunc
def all_(expr):
return f'BOOL_AND({expr})'
@aggfunc
def median_(expr, partitions=None):
return f'MEDIAN({expr})'
# %% ^━━━━━━━━━━━━━━━━━━━━ WINDOW FUNCTIONS ━━━━━━━━━━━━━━━━━━^
def partstr(partitions):
if partitions is not None:
return 'OVER' + '(PARTITION BY' + ','.join(map(str, partitions)) + ')'
return ''
# @windowfunc
# def ntile_(perc=100, expr=None, partitions=None, order=None):
# return f'NTILE({perc}) OVER (PARTITION BY {partitions} ORDER BY {expr})'
@windowfunc
def listagg_(expr=None, order=None):
return f'LISTAGG({expr}) WITHIN GROUP (ORDER BY {expr})'
# @windowfunc
# def quantileof_(perc=100, expr=None, partitions=None):
# return f'PERCENTILE_DISC({perc/100}) WITHIN GROUP (ORDER BY {expr}){partitions}'
# @windowfunc
# def median_(expr, partitions=None):
# return f'MEDIAN({expr}) OVER (PARTITION BY {partitions})'
@windowfunc
def rank_(order=None, partitions=None):
return f'ROW_NUMBER() OVER (PARTITION BY {partitions} ORDER BY {order})'
@windowfunc
def first_(expr, *partitions, order):
parts = ','.join(map(str, partitions))
return f'''FIRST_VALUE({expr}) OVER (PARTITION BY {parts} ORDER BY {orderby} ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)'''
@windowfunc
def last_(expr, *partitions, order):
parts = ','.join(map(str, partitions))
return f'''LAST_VALUE({expr}) OVER (PARTITION BY {parts} ORDER BY {orderby} ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)'''
# @windowfunc
# def zscore_(expr, partitionexpr):
# return f"({expr} - AVG({expr}) OVER (PARTITION BY {partitionexpr})) :: FLOAT / STDDEV({expr}) OVER (PARTITION BY {partitionexpr})"
@windowfunc
def lag_(expr, order=None, partitions=None):
if partitions and not isinstance(partitions, list):
partitions = [partitions]
parts = ''
if partitions:
parts = ','.join(map(str, partitions))
parts = f'PARTITION BY {parts} '
return f'''LAG({expr}) OVER ({parts}ORDER BY {order})'''
# %% ^━━━━━━━━━━━━━━━━━━━━━ MISC FUNCTIONS ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━^
@Lifted
def ratio_(expr, cond):
return expr.ifen_(cond).count_().zerodiv_(expr.count_())
@Lifted
def between(self, bound, tlimit=None, ts='ts'):
# todo: add a decorator for this
try:
tsvar = getattr(self, ts)
except AttributeError:
tsvars = self.items().filter(lambda x: '_ts' in x[0]).fmap(lambda x: x[1])
if tsvars:
tsvar = tsvars[0]
else:
tsvar = self
# raise AttributeError(f"No timestamp in {self}")
if isinstance(tlimit, Columns) or isinstance(tlimit, str) or isinstance(tlimit, dt.datetime):
return (tsvar > bound) & (tsvar < tlimit)
elif isinstance(tlimit, int):
if tlimit > 0:
tlimit = timedelta(days=tlimit)
return (tsvar > bound) & (tsvar < bound + tlimit)
else:
tlimit = timedelta(days=-tlimit)
return (tsvar < bound) & (tsvar > bound - tlimit)
else:
raise TypeError("Between func: invalid upper limit")
# %% ^━━━━━━━━━━━━━━━━━ OTHER FUNCTIONS ━━━━━━━━━━━━━━━━━━^
| mit | 1,667,856,729,118,131,700 | 31.551867 | 141 | 0.590567 | false |
eltonsantos/quokka | quokka/core/admin/__init__.py | 1 | 1953 | #!/usr/bin/env python
# -*- coding: utf-8 -*
import logging
from flask import request, session
from flask.ext.admin import Admin
from flask.ext.admin.contrib import fileadmin
from .models import ModelAdmin
from .views import IndexView
logger = logging.getLogger()
class QuokkaAdmin(Admin):
def register(self, model, view=None, *args, **kwargs):
View = view or ModelAdmin
try:
self.add_view(View(model, *args, **kwargs))
except Exception as e:
logger.warning(
"admin.register({}, {}, {}, {}) error: {}".format(
model, view, args, kwargs, e.message
)
)
def create_admin(app=None):
return QuokkaAdmin(app, index_view=IndexView())
def configure_admin(app, admin):
ADMIN = app.config.get(
'ADMIN',
{
'name': 'Quokka Admin',
'url': '/admin'
}
)
for k, v in list(ADMIN.items()):
setattr(admin, k, v)
babel = app.extensions.get('babel')
if babel:
try:
@babel.localeselector
def get_locale():
override = request.args.get('lang')
if override:
session['lang'] = override
return session.get('lang', 'en')
admin.locale_selector(get_locale)
except:
pass # Exception: Can not add locale_selector second time.
for entry in app.config.get('FILE_ADMIN', []):
try:
admin.add_view(
fileadmin.FileAdmin(
entry['path'],
entry['url'],
name=entry['name'],
category=entry['category'],
endpoint=entry['endpoint']
)
)
except:
pass # TODO: check blueprint endpoisnt colision
if admin.app is None:
admin.init_app(app)
return admin
| mit | -2,768,285,031,541,726,700 | 24.363636 | 71 | 0.517665 | false |
bgris/ODL_bgris | lib/python3.5/site-packages/astroid/tests/unittest_regrtest.py | 1 | 12203 | # copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
import sys
import unittest
import textwrap
import six
from astroid import MANAGER, Instance, nodes
from astroid.bases import BUILTINS
from astroid.builder import AstroidBuilder
from astroid import exceptions
from astroid.raw_building import build_module
from astroid.manager import AstroidManager
from astroid.test_utils import require_version, extract_node
from astroid.tests import resources
from astroid import transforms
class NonRegressionTests(resources.AstroidCacheSetupMixin,
unittest.TestCase):
def setUp(self):
sys.path.insert(0, resources.find('data'))
MANAGER.always_load_extensions = True
MANAGER.astroid_cache[BUILTINS] = self._builtins
def tearDown(self):
# Since we may have created a brainless manager, leading
# to a new cache builtin module and proxy classes in the constants,
# clear out the global manager cache.
MANAGER.clear_cache(self._builtins)
MANAGER.always_load_extensions = False
sys.path.pop(0)
sys.path_importer_cache.pop(resources.find('data'), None)
def brainless_manager(self):
manager = AstroidManager()
# avoid caching into the AstroidManager borg since we get problems
# with other tests :
manager.__dict__ = {}
manager._failed_import_hooks = []
manager.astroid_cache = {}
manager._mod_file_cache = {}
manager._transform = transforms.TransformVisitor()
manager.clear_cache() # trigger proper bootstraping
return manager
def test_module_path(self):
man = self.brainless_manager()
mod = man.ast_from_module_name('package.import_package_subpackage_module')
package = next(mod.igetattr('package'))
self.assertEqual(package.name, 'package')
subpackage = next(package.igetattr('subpackage'))
self.assertIsInstance(subpackage, nodes.Module)
self.assertTrue(subpackage.package)
self.assertEqual(subpackage.name, 'package.subpackage')
module = next(subpackage.igetattr('module'))
self.assertEqual(module.name, 'package.subpackage.module')
def test_package_sidepackage(self):
manager = self.brainless_manager()
assert 'package.sidepackage' not in MANAGER.astroid_cache
package = manager.ast_from_module_name('absimp')
self.assertIsInstance(package, nodes.Module)
self.assertTrue(package.package)
subpackage = next(package.getattr('sidepackage')[0].infer())
self.assertIsInstance(subpackage, nodes.Module)
self.assertTrue(subpackage.package)
self.assertEqual(subpackage.name, 'absimp.sidepackage')
def test_living_property(self):
builder = AstroidBuilder()
builder._done = {}
builder._module = sys.modules[__name__]
builder.object_build(build_module('module_name', ''), Whatever)
def test_new_style_class_detection(self):
try:
import pygtk # pylint: disable=unused-variable
except ImportError:
self.skipTest('test skipped: pygtk is not available')
# XXX may fail on some pygtk version, because objects in
# gobject._gobject have __module__ set to gobject :(
builder = AstroidBuilder()
data = """
import pygtk
pygtk.require("2.6")
import gobject
class A(gobject.GObject):
pass
"""
astroid = builder.string_build(data, __name__, __file__)
a = astroid['A']
self.assertTrue(a.newstyle)
def test_pylint_config_attr(self):
try:
from pylint import lint # pylint: disable=unused-variable
except ImportError:
self.skipTest('pylint not available')
mod = MANAGER.ast_from_module_name('pylint.lint')
pylinter = mod['PyLinter']
expect = ['OptionsManagerMixIn', 'object', 'MessagesHandlerMixIn',
'ReportsHandlerMixIn', 'BaseTokenChecker', 'BaseChecker',
'OptionsProviderMixIn']
self.assertListEqual([c.name for c in pylinter.ancestors()],
expect)
self.assertTrue(list(Instance(pylinter).getattr('config')))
inferred = list(Instance(pylinter).igetattr('config'))
self.assertEqual(len(inferred), 1)
self.assertEqual(inferred[0].root().name, 'optparse')
self.assertEqual(inferred[0].name, 'Values')
def test_numpy_crash(self):
"""test don't crash on numpy"""
#a crash occured somewhere in the past, and an
# InferenceError instead of a crash was better, but now we even infer!
try:
import numpy # pylint: disable=unused-variable
except ImportError:
self.skipTest('test skipped: numpy is not available')
builder = AstroidBuilder()
data = """
from numpy import multiply
multiply(1, 2, 3)
"""
astroid = builder.string_build(data, __name__, __file__)
callfunc = astroid.body[1].value.func
inferred = callfunc.inferred()
self.assertEqual(len(inferred), 2)
@require_version('3.0')
def test_nameconstant(self):
# used to fail for Python 3.4
builder = AstroidBuilder()
astroid = builder.string_build("def test(x=True): pass")
default = astroid.body[0].args.args[0]
self.assertEqual(default.name, 'x')
self.assertEqual(next(default.infer()).value, True)
@require_version('2.7')
def test_with_infer_assignnames(self):
builder = AstroidBuilder()
data = """
with open('a.txt') as stream, open('b.txt'):
stream.read()
"""
astroid = builder.string_build(data, __name__, __file__)
# Used to crash due to the fact that the second
# context manager didn't use an assignment name.
list(astroid.nodes_of_class(nodes.Call))[-1].inferred()
def test_recursion_regression_issue25(self):
builder = AstroidBuilder()
data = """
import recursion as base
_real_Base = base.Base
class Derived(_real_Base):
pass
def run():
base.Base = Derived
"""
astroid = builder.string_build(data, __name__, __file__)
# Used to crash in _is_metaclass, due to wrong
# ancestors chain
classes = astroid.nodes_of_class(nodes.ClassDef)
for klass in classes:
# triggers the _is_metaclass call
klass.type # pylint: disable=pointless-statement
def test_decorator_callchain_issue42(self):
builder = AstroidBuilder()
data = """
def test():
def factory(func):
def newfunc():
func()
return newfunc
return factory
@test()
def crash():
pass
"""
astroid = builder.string_build(data, __name__, __file__)
self.assertEqual(astroid['crash'].type, 'function')
def test_filter_stmts_scoping(self):
builder = AstroidBuilder()
data = """
def test():
compiler = int()
class B(compiler.__class__):
pass
compiler = B()
return compiler
"""
astroid = builder.string_build(data, __name__, __file__)
test = astroid['test']
result = next(test.infer_call_result(astroid))
self.assertIsInstance(result, Instance)
base = next(result._proxied.bases[0].infer())
self.assertEqual(base.name, 'int')
def test_ancestors_patching_class_recursion(self):
node = AstroidBuilder().string_build(textwrap.dedent("""
import string
Template = string.Template
class A(Template):
pass
class B(A):
pass
def test(x=False):
if x:
string.Template = A
else:
string.Template = B
"""))
klass = node['A']
ancestors = list(klass.ancestors())
self.assertEqual(ancestors[0].qname(), 'string.Template')
def test_ancestors_yes_in_bases(self):
# Test for issue https://bitbucket.org/logilab/astroid/issue/84
# This used to crash astroid with a TypeError, because an YES
# node was present in the bases
node = extract_node("""
def with_metaclass(meta, *bases):
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
import lala
class A(with_metaclass(object, lala.lala)): #@
pass
""")
ancestors = list(node.ancestors())
if six.PY3:
self.assertEqual(len(ancestors), 1)
self.assertEqual(ancestors[0].qname(),
"{}.object".format(BUILTINS))
else:
self.assertEqual(len(ancestors), 0)
def test_ancestors_missing_from_function(self):
# Test for https://www.logilab.org/ticket/122793
node = extract_node('''
def gen(): yield
GEN = gen()
next(GEN)
''')
self.assertRaises(exceptions.InferenceError, next, node.infer())
def test_unicode_in_docstring(self):
# Crashed for astroid==1.4.1
# Test for https://bitbucket.org/logilab/astroid/issues/273/
# In a regular file, "coding: utf-8" would have been used.
node = extract_node(u'''
from __future__ import unicode_literals
class MyClass(object):
def method(self):
"With unicode : %s "
instance = MyClass()
''' % u"\u2019")
next(node.value.infer()).as_string()
def test_binop_generates_nodes_with_parents(self):
node = extract_node('''
def no_op(*args):
pass
def foo(*args):
def inner(*more_args):
args + more_args #@
return inner
''')
inferred = next(node.infer())
self.assertIsInstance(inferred, nodes.Tuple)
self.assertIsNotNone(inferred.parent)
self.assertIsInstance(inferred.parent, nodes.BinOp)
def test_decorator_names_inference_error_leaking(self):
node = extract_node('''
class Parent(object):
@property
def foo(self):
pass
class Child(Parent):
@Parent.foo.getter
def foo(self): #@
return super(Child, self).foo + ['oink']
''')
inferred = next(node.infer())
self.assertEqual(inferred.decoratornames(), set())
def test_ssl_protocol(self):
node = extract_node('''
import ssl
ssl.PROTOCOL_TLSv1
''')
inferred = next(node.infer())
self.assertIsInstance(inferred, nodes.Const)
def test_uninferable_string_argument_of_namedtuple(self):
node = extract_node('''
import collections
collections.namedtuple('{}'.format("a"), '')()
''')
next(node.infer())
@require_version(maxver='3.0')
def test_reassignment_in_except_handler(self):
node = extract_node('''
import exceptions
try:
{}["a"]
except KeyError, exceptions.IndexError:
pass
IndexError #@
''')
self.assertEqual(len(node.inferred()), 1)
class Whatever(object):
a = property(lambda x: x, lambda x: x)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -4,269,198,178,739,831,300 | 32.401099 | 82 | 0.609276 | false |
maleficarium/youtube-dl | youtube_dl/postprocessor/ffmpeg.py | 1 | 22575 | from __future__ import unicode_literals
import io
import os
import subprocess
import time
from .common import AudioConversionError, PostProcessor
from ..compat import (
compat_subprocess_get_DEVNULL,
)
from ..utils import (
encodeArgument,
encodeFilename,
get_exe_version,
is_outdated_version,
PostProcessingError,
prepend_extension,
shell_quote,
subtitles_filename,
dfxp2srt,
ISO639Utils,
)
EXT_TO_OUT_FORMATS = {
"aac": "adts",
"m4a": "ipod",
"mka": "matroska",
"mkv": "matroska",
"mpg": "mpeg",
"ogv": "ogg",
"ts": "mpegts",
"wma": "asf",
"wmv": "asf",
}
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None):
PostProcessor.__init__(self, downloader)
self._determine_executables()
def check_version(self):
if not self.available:
raise FFmpegPostProcessorError('ffmpeg or avconv not found. Please install one.')
required_version = '10-0' if self.basename == 'avconv' else '1.0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
@staticmethod
def get_versions(downloader=None):
return FFmpegPostProcessor(downloader)._versions
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
prefer_ffmpeg = False
self.basename = None
self.probe_basename = None
self._paths = None
self._versions = None
if self._downloader:
prefer_ffmpeg = self._downloader.params.get('prefer_ffmpeg', False)
location = self._downloader.params.get('ffmpeg_location')
if location is not None:
if not os.path.exists(location):
self._downloader.report_warning(
'ffmpeg-location %s does not exist! '
'Continuing without avconv/ffmpeg.' % (location))
self._versions = {}
return
elif not os.path.isdir(location):
basename = os.path.splitext(os.path.basename(location))[0]
if basename not in programs:
self._downloader.report_warning(
'Cannot identify executable %s, its basename should be one of %s. '
'Continuing without avconv/ffmpeg.' %
(location, ', '.join(programs)))
self._versions = {}
return None
location = os.path.dirname(os.path.abspath(location))
if basename in ('ffmpeg', 'ffprobe'):
prefer_ffmpeg = True
self._paths = dict(
(p, os.path.join(location, p)) for p in programs)
self._versions = dict(
(p, get_exe_version(self._paths[p], args=['-version']))
for p in programs)
if self._versions is None:
self._versions = dict(
(p, get_exe_version(p, args=['-version'])) for p in programs)
self._paths = dict((p, p) for p in programs)
if prefer_ffmpeg:
prefs = ('ffmpeg', 'avconv')
else:
prefs = ('avconv', 'ffmpeg')
for p in prefs:
if self._versions[p]:
self.basename = p
break
if prefer_ffmpeg:
prefs = ('ffprobe', 'avprobe')
else:
prefs = ('avprobe', 'ffprobe')
for p in prefs:
if self._versions[p]:
self.probe_basename = p
break
@property
def available(self):
return self.basename is not None
@property
def executable(self):
return self._paths[self.basename]
@property
def probe_available(self):
return self.probe_basename is not None
@property
def probe_executable(self):
return self._paths[self.probe_basename]
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts):
self.check_version()
oldest_mtime = min(
os.stat(encodeFilename(path)).st_mtime for path in input_paths)
opts += self._configuration_args()
files_cmd = []
for path in input_paths:
files_cmd.extend([
encodeArgument('-i'),
encodeFilename(self._ffmpeg_filename_argument(path), True)
])
cmd = ([encodeFilename(self.executable, True), encodeArgument('-y')] +
files_cmd +
[encodeArgument(o) for o in opts] +
[encodeFilename(self._ffmpeg_filename_argument(out_path), True)])
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] ffmpeg command line: %s' % shell_quote(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
stderr = stderr.decode('utf-8', 'replace')
msg = stderr.strip().split('\n')[-1]
raise FFmpegPostProcessorError(msg)
self.try_utime(out_path, oldest_mtime, oldest_mtime)
def run_ffmpeg(self, path, out_path, opts):
self.run_ffmpeg_multiple_files([path], out_path, opts)
def _ffmpeg_filename_argument(self, fn):
# Always use 'file:' because the filename may contain ':' (ffmpeg
# interprets that as a protocol) or can start with '-' (-- is broken in
# ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
# Also leave '-' intact in order not to break streaming to stdout.
return 'file:' + fn if fn != '-' else fn
class FFmpegExtractAudioPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
if preferredcodec is None:
preferredcodec = 'best'
self._preferredcodec = preferredcodec
self._preferredquality = preferredquality
self._nopostoverwrites = nopostoverwrites
def get_audio_codec(self, path):
if not self.probe_available:
raise PostProcessingError('ffprobe or avprobe not found. Please install one.')
try:
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-show_streams'),
encodeFilename(self._ffmpeg_filename_argument(path), True)]
if self._downloader.params.get('verbose', False):
self._downloader.to_screen('[debug] %s command line: %s' % (self.basename, shell_quote(cmd)))
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE, stdin=subprocess.PIPE)
output = handle.communicate()[0]
if handle.wait() != 0:
return None
except (IOError, OSError):
return None
audio_codec = None
for line in output.decode('ascii', 'ignore').split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
return None
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = ['-bsf:a', 'aac_adtstoasc']
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = []
if self._preferredquality is not None:
if int(self._preferredquality) < 10:
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
else:
# We convert the audio (lossy)
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
extension = self._preferredcodec
more_opts = []
if self._preferredquality is not None:
# The opus codec doesn't support the -aq option
if int(self._preferredquality) < 10 and extension != 'opus':
more_opts += ['-q:a', self._preferredquality]
else:
more_opts += ['-b:a', self._preferredquality + 'k']
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
if self._preferredcodec == 'm4a':
more_opts += ['-bsf:a', 'aac_adtstoasc']
if self._preferredcodec == 'vorbis':
extension = 'ogg'
if self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
if (new_path == path or
(self._nopostoverwrites and os.path.exists(encodeFilename(new_path)))):
self._downloader.to_screen('[ffmpeg] Post-process file %s exists, skipping' % new_path)
return [], information
try:
self._downloader.to_screen('[ffmpeg] Destination: ' + new_path)
self.run_ffmpeg(path, new_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
'audio conversion failed: ' + e.msg)
except Exception:
raise PostProcessingError('error running ' + self.basename)
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
self.try_utime(
new_path, time.time(), information['filetime'],
errnote='Cannot update utime of audio file')
information['filepath'] = new_path
information['ext'] = extension
return [path], information
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
self._preferedformat = preferedformat
def run(self, information):
path = information['filepath']
if information['ext'] == self._preferedformat:
self._downloader.to_screen('[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
return [], information
options = []
if self._preferedformat == 'avi':
options.extend(['-c:v', 'libxvid', '-vtag', 'XVID'])
prefix, sep, ext = path.rpartition('.')
outpath = prefix + sep + self._preferedformat
self._downloader.to_screen('[' + 'ffmpeg' + '] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) + outpath)
self.run_ffmpeg(path, outpath, options)
information['filepath'] = outpath
information['format'] = self._preferedformat
information['ext'] = self._preferedformat
return [path], information
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
def run(self, information):
if information['ext'] not in ('mp4', 'webm', 'mkv'):
self._downloader.to_screen('[ffmpeg] Subtitles can only be embedded in mp4, webm or mkv files')
return [], information
subtitles = information.get('requested_subtitles')
if not subtitles:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to embed')
return [], information
filename = information['filepath']
ext = information['ext']
sub_langs = []
sub_filenames = []
webm_vtt_warn = False
for lang, sub_info in subtitles.items():
sub_ext = sub_info['ext']
if ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
sub_langs.append(lang)
sub_filenames.append(subtitles_filename(filename, lang, sub_ext))
else:
if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
webm_vtt_warn = True
self._downloader.to_screen('[ffmpeg] Only WebVTT subtitles can be embedded in webm files')
if not sub_langs:
return [], information
input_files = [filename] + sub_filenames
opts = [
'-map', '0',
'-c', 'copy',
# Don't copy the existing subtitles, we may be running the
# postprocessor a second time
'-map', '-0:s',
]
if information['ext'] == 'mp4':
opts += ['-c:s', 'mov_text']
for (i, lang) in enumerate(sub_langs):
opts.extend(['-map', '%d:0' % (i + 1)])
lang_code = ISO639Utils.short2long(lang)
if lang_code is not None:
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
temp_filename = prepend_extension(filename, 'temp')
self._downloader.to_screen('[ffmpeg] Embedding subtitles in \'%s\'' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return sub_filenames, information
class FFmpegMetadataPP(FFmpegPostProcessor):
def run(self, info):
metadata = {}
def add(meta_list, info_list=None):
if not info_list:
info_list = meta_list
if not isinstance(meta_list, (list, tuple)):
meta_list = (meta_list,)
if not isinstance(info_list, (list, tuple)):
info_list = (info_list,)
for info_f in info_list:
if info.get(info_f) is not None:
for meta_f in meta_list:
metadata[meta_f] = info[info_f]
break
add('title', ('track', 'title'))
add('date', 'upload_date')
add(('description', 'comment'), 'description')
add('purl', 'webpage_url')
add('track', 'track_number')
add('artist', ('artist', 'creator', 'uploader', 'uploader_id'))
add('genre')
add('album')
add('album_artist')
add('disc', 'disc_number')
if not metadata:
self._downloader.to_screen('[ffmpeg] There isn\'t any metadata to add')
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
if info['ext'] == 'm4a':
options = ['-vn', '-acodec', 'copy']
else:
options = ['-c', 'copy']
for (name, value) in metadata.items():
options.extend(['-metadata', '%s=%s' % (name, value)])
self._downloader.to_screen('[ffmpeg] Adding metadata to \'%s\'' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegMergerPP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
temp_filename = encodeFilename(temp_filename, True).decode()
args = ['-c', 'copy', '-map', '0:v:0', '-map', '1:a:0']
self._downloader.to_screen('[ffmpeg] Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return info['__files_to_merge'], info
def can_merge(self):
# TODO: figure out merge-capable ffmpeg version
if self.basename != 'avconv':
return True
required_version = '10-0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
'youtube-dl will download single file media. '
'Update %s to version %s or newer to fix this.') % (
self.basename, self.basename, required_version)
if self._downloader:
self._downloader.report_warning(warning)
return False
return True
class FFmpegFixupStretchedPP(FFmpegPostProcessor):
def run(self, info):
stretched_ratio = info.get('stretched_ratio')
if stretched_ratio is None or stretched_ratio == 1:
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-aspect', '%f' % stretched_ratio]
self._downloader.to_screen('[ffmpeg] Fixing aspect ratio in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM4aPP(FFmpegPostProcessor):
def run(self, info):
if info.get('container') != 'm4a_dash':
return [], info
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4']
self._downloader.to_screen('[ffmpeg] Correcting container in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegFixupM3u8PP(FFmpegPostProcessor):
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
options = ['-c', 'copy', '-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
self._downloader.to_screen('[ffmpeg] Fixing malformated aac bitstream in "%s"' % filename)
self.run_ffmpeg(filename, temp_filename, options)
os.remove(encodeFilename(filename))
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return [], info
class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
def __init__(self, downloader=None, format=None):
super(FFmpegSubtitlesConvertorPP, self).__init__(downloader)
self.format = format
def run(self, info):
subs = info.get('requested_subtitles')
filename = info['filepath']
new_ext = self.format
new_format = new_ext
if new_format == 'vtt':
new_format = 'webvtt'
if subs is None:
self._downloader.to_screen('[ffmpeg] There aren\'t any subtitles to convert')
return [], info
self._downloader.to_screen('[ffmpeg] Converting subtitles')
sub_filenames = []
for lang, sub in subs.items():
ext = sub['ext']
if ext == new_ext:
self._downloader.to_screen(
'[ffmpeg] Subtitle file for %s is already in the requested'
'format' % new_ext)
continue
old_file = subtitles_filename(filename, lang, ext)
sub_filenames.append(old_file)
new_file = subtitles_filename(filename, lang, new_ext)
if ext == 'dfxp' or ext == 'ttml' or ext == 'tt':
self._downloader.report_warning(
'You have requested to convert dfxp (TTML) subtitles into another format, '
'which results in style information loss')
dfxp_file = old_file
srt_file = subtitles_filename(filename, lang, 'srt')
with io.open(dfxp_file, 'rt', encoding='utf-8') as f:
srt_data = dfxp2srt(f.read())
with io.open(srt_file, 'wt', encoding='utf-8') as f:
f.write(srt_data)
old_file = srt_file
subs[lang] = {
'ext': 'srt',
'data': srt_data
}
if new_ext == 'srt':
continue
else:
sub_filenames.append(srt_file)
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
'ext': new_ext,
'data': f.read(),
}
return sub_filenames, info
| unlicense | 2,100,544,247,190,943,200 | 37.855422 | 157 | 0.558051 | false |
saeki-masaki/cinder | cinder/tests/unit/test_hds_iscsi.py | 1 | 16404 | # Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Self test for Hitachi Unified Storage (HUS-HNAS) platform.
"""
import os
import tempfile
import mock
from oslo_log import log as logging
import six
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.hds import iscsi
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
HNASCONF = """<?xml version="1.0" encoding="UTF-8" ?>
<config>
<hnas_cmd>ssc</hnas_cmd>
<chap_enabled>True</chap_enabled>
<mgmt_ip0>172.17.44.15</mgmt_ip0>
<username>supervisor</username>
<password>supervisor</password>
<svc_0>
<volume_type>default</volume_type>
<iscsi_ip>172.17.39.132</iscsi_ip>
<hdp>fs2</hdp>
</svc_0>
<svc_1>
<volume_type>silver</volume_type>
<iscsi_ip>172.17.39.133</iscsi_ip>
<hdp>fs2</hdp>
</svc_1>
</config>
"""
HNAS_WRONG_CONF1 = """<?xml version="1.0" encoding="UTF-8" ?>
<config>
<hnas_cmd>ssc</hnas_cmd>
<mgmt_ip0>172.17.44.15</mgmt_ip0>
<username>supervisor</username>
<password>supervisor</password>
<volume_type>default</volume_type>
<hdp>172.17.39.132:/cinder</hdp>
</svc_0>
</config>
"""
HNAS_WRONG_CONF2 = """<?xml version="1.0" encoding="UTF-8" ?>
<config>
<hnas_cmd>ssc</hnas_cmd>
<mgmt_ip0>172.17.44.15</mgmt_ip0>
<username>supervisor</username>
<password>supervisor</password>
<svc_0>
<volume_type>default</volume_type>
</svc_0>
<svc_1>
<volume_type>silver</volume_type>
</svc_1>
</config>
"""
# The following information is passed on to tests, when creating a volume
_VOLUME = {'name': 'testvol', 'volume_id': '1234567890', 'size': 128,
'volume_type': 'silver', 'volume_type_id': '1',
'provider_location': None, 'id': 'abcdefg',
'host': 'host1@hnas-iscsi-backend#silver'}
class SimulatedHnasBackend(object):
"""Simulation Back end. Talks to HNAS."""
# these attributes are shared across object instances
start_lun = 0
init_index = 0
target_index = 0
hlun = 0
def __init__(self):
self.type = 'HNAS'
self.out = ''
self.volumes = []
# iSCSI connections
self.connections = []
def deleteVolume(self, name):
LOG.info("delVolume: name %s", name)
volume = self.getVolume(name)
if volume:
LOG.info("deleteVolume: deleted name %s provider %s",
volume['name'], volume['provider_location'])
self.volumes.remove(volume)
return True
else:
return False
def deleteVolumebyProvider(self, provider):
LOG.info("delVolumeP: provider %s", provider)
volume = self.getVolumebyProvider(provider)
if volume:
LOG.info("deleteVolumeP: deleted name %s provider %s",
volume['name'], volume['provider_location'])
self.volumes.remove(volume)
return True
else:
return False
def getVolumes(self):
return self.volumes
def getVolume(self, name):
LOG.info("getVolume: find by name %s", name)
if self.volumes:
for volume in self.volumes:
if str(volume['name']) == name:
LOG.info("getVolume: found name %s provider %s",
volume['name'], volume['provider_location'])
return volume
else:
LOG.info("getVolume: no volumes")
LOG.info("getVolume: not found")
return None
def getVolumebyProvider(self, provider):
LOG.info("getVolumeP: find by provider %s", provider)
if self.volumes:
for volume in self.volumes:
if str(volume['provider_location']) == provider:
LOG.info("getVolumeP: found name %s provider %s",
volume['name'], volume['provider_location'])
return volume
else:
LOG.info("getVolumeP: no volumes")
LOG.info("getVolumeP: not found")
return None
def createVolume(self, name, provider, sizeMiB, comment):
LOG.info("createVolume: name %s provider %s comment %s",
name, provider, comment)
new_vol = {'additionalStates': [],
'adminSpace': {'freeMiB': 0,
'rawReservedMiB': 384,
'reservedMiB': 128,
'usedMiB': 128},
'baseId': 115,
'copyType': 1,
'creationTime8601': '2012-10-22T16:37:57-07:00',
'creationTimeSec': 1350949077,
'failedStates': [],
'id': 115,
'provider_location': provider,
'name': name,
'comment': comment,
'provisioningType': 1,
'readOnly': False,
'sizeMiB': sizeMiB,
'state': 1,
'userSpace': {'freeMiB': 0,
'rawReservedMiB': 41984,
'reservedMiB': 31488,
'usedMiB': 31488},
'usrSpcAllocLimitPct': 0,
'usrSpcAllocWarningPct': 0,
'uuid': '1e7daee4-49f4-4d07-9ab8-2b6a4319e243',
'wwn': '50002AC00073383D'}
self.volumes.append(new_vol)
def create_lu(self, cmd, ip0, user, pw, hdp, size, name):
vol_id = name
_out = ("LUN: %d HDP: fs2 size: %s MB, is successfully created" %
(self.start_lun, size))
self.createVolume(name, vol_id, size, "create-lu")
self.start_lun += 1
return _out
def delete_lu(self, cmd, ip0, user, pw, hdp, lun):
_out = ""
id = "myID"
LOG.info("Delete_Lu: check lun %s id %s", lun, id)
if self.deleteVolumebyProvider(id + '.' + str(lun)):
LOG.warning("Delete_Lu: failed to delete lun %s id %s", lun, id)
return _out
def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
_out = ("LUN: %s HDP: 9 size: %s MB, is successfully created" %
(self.start_lun, size))
id = name
LOG.info("HNAS Create_Dup: %d", self.start_lun)
self.createVolume(name, id + '.' + str(self.start_lun), size,
"create-dup")
self.start_lun += 1
return _out
def add_iscsi_conn(self, cmd, ip0, user, pw, lun, hdp,
port, iqn, initiator):
ctl = ""
conn = (self.hlun, lun, initiator, self.init_index, iqn,
self.target_index, ctl, port)
_out = ("H-LUN: %d mapped. LUN: %s, iSCSI Initiator: %s @ index: %d, \
and Target: %s @ index %d is successfully paired @ CTL: %s, \
Port: %s" % conn)
self.init_index += 1
self.target_index += 1
self.hlun += 1
LOG.debug("Created connection %d", self.init_index)
self.connections.append(conn)
return _out
def del_iscsi_conn(self, cmd, ip0, user, pw, port, iqn, initiator):
self.connections.pop()
_out = ("H-LUN: successfully deleted from target")
return _out
def extend_vol(self, cmd, ip0, user, pw, hdp, lu, size, name):
_out = ("LUN: %s successfully extended to %s MB" % (lu, size))
id = name
self.out = _out
LOG.info("extend_vol: lu: %s %d -> %s", lu, int(size), self.out)
v = self.getVolumebyProvider(id + '.' + str(lu))
if v:
v['sizeMiB'] = size
LOG.info("extend_vol: out %s %s", self.out, self)
return _out
def get_luns(self):
return len(self.alloc_lun)
def get_conns(self):
return len(self.connections)
def get_out(self):
return str(self.out)
def get_version(self, cmd, ver, ip0, user, pw):
self.out = "Array_ID: 18-48-A5-A1-80-13 (3080-G2) " \
"version: 11.2.3319.09 LU: 256" \
" RG: 0 RG_LU: 0 Utility_version: 11.1.3225.01"
return self.out
def get_iscsi_info(self, cmd, ip0, user, pw):
self.out = "CTL: 0 Port: 4 IP: 172.17.39.132 Port: 3260 Link: Up\n" \
"CTL: 1 Port: 5 IP: 172.17.39.133 Port: 3260 Link: Up"
return self.out
def get_hdp_info(self, cmd, ip0, user, pw, fslabel=None):
self.out = "HDP: 1024 272384 MB 33792 MB 12 % LUs: " \
"70 Normal fs1\n" \
"HDP: 1025 546816 MB 73728 MB 13 % LUs: 194 Normal fs2"
return self.out
def get_targetiqn(self, cmd, ip0, user, pw, id, hdp, secret):
self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget"""
return self.out
def set_targetsecret(self, cmd, ip0, user, pw, target, hdp, secret):
self.out = """iqn.2013-08.cinderdomain:vs61.cindertarget"""
return self.out
def get_targetsecret(self, cmd, ip0, user, pw, target, hdp):
self.out = """wGkJhTpXaaYJ5Rv"""
return self.out
class HNASiSCSIDriverTest(test.TestCase):
"""Test HNAS iSCSI volume driver."""
def __init__(self, *args, **kwargs):
super(HNASiSCSIDriverTest, self).__init__(*args, **kwargs)
@mock.patch.object(iscsi, 'factory_bend')
def setUp(self, _factory_bend):
super(HNASiSCSIDriverTest, self).setUp()
self.backend = SimulatedHnasBackend()
_factory_bend.return_value = self.backend
(handle, self.config_file) = tempfile.mkstemp('.xml')
os.write(handle, HNASCONF)
os.close(handle)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.hds_hnas_iscsi_config_file = self.config_file
self.configuration.hds_svc_iscsi_chap_enabled = True
self.driver = iscsi.HDSISCSIDriver(configuration=self.configuration)
self.driver.do_setup("")
self.addCleanup(self._clean)
def _clean(self):
os.remove(self.config_file)
def _create_volume(self):
loc = self.driver.create_volume(_VOLUME)
vol = _VOLUME.copy()
vol['provider_location'] = loc['provider_location']
return vol
@mock.patch('__builtin__.open')
@mock.patch.object(os, 'access')
def test_read_config(self, m_access, m_open):
# Test exception when file is not found
m_access.return_value = False
m_open.return_value = six.StringIO(HNASCONF)
self.assertRaises(exception.NotFound, iscsi._read_config, '')
# Test exception when config file has parsing errors
# due to missing <svc> tag
m_access.return_value = True
m_open.return_value = six.StringIO(HNAS_WRONG_CONF1)
self.assertRaises(exception.ConfigNotFound, iscsi._read_config, '')
# Test exception when config file has parsing errors
# due to missing <hdp> tag
m_open.return_value = six.StringIO(HNAS_WRONG_CONF2)
self.configuration.hds_hnas_iscsi_config_file = ''
self.assertRaises(exception.ParameterNotFound, iscsi._read_config, '')
def test_create_volume(self):
loc = self.driver.create_volume(_VOLUME)
self.assertNotEqual(loc, None)
self.assertNotEqual(loc['provider_location'], None)
# cleanup
self.backend.deleteVolumebyProvider(loc['provider_location'])
def test_get_volume_stats(self):
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats["vendor_name"], "HDS")
self.assertEqual(stats["storage_protocol"], "iSCSI")
self.assertEqual(len(stats['pools']), 2)
def test_delete_volume(self):
vol = self._create_volume()
self.driver.delete_volume(vol)
# should not be deletable twice
prov_loc = self.backend.getVolumebyProvider(vol['provider_location'])
self.assertTrue(prov_loc is None)
def test_extend_volume(self):
vol = self._create_volume()
new_size = _VOLUME['size'] * 2
self.driver.extend_volume(vol, new_size)
# cleanup
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
def test_create_snapshot(self, m_id_to_vol):
vol = self._create_volume()
m_id_to_vol.return_value = vol
svol = vol.copy()
svol['volume_size'] = svol['size']
loc = self.driver.create_snapshot(svol)
self.assertNotEqual(loc, None)
svol['provider_location'] = loc['provider_location']
# cleanup
self.backend.deleteVolumebyProvider(svol['provider_location'])
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
def test_create_clone(self, m_id_to_vol):
src_vol = self._create_volume()
m_id_to_vol.return_value = src_vol
src_vol['volume_size'] = src_vol['size']
dst_vol = self._create_volume()
dst_vol['volume_size'] = dst_vol['size']
loc = self.driver.create_cloned_volume(dst_vol, src_vol)
self.assertNotEqual(loc, None)
# cleanup
self.backend.deleteVolumebyProvider(src_vol['provider_location'])
self.backend.deleteVolumebyProvider(loc['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_id_to_vol')
def test_delete_snapshot(self, m_id_to_vol):
svol = self._create_volume()
lun = svol['provider_location']
m_id_to_vol.return_value = svol
self.driver.delete_snapshot(svol)
self.assertTrue(self.backend.getVolumebyProvider(lun) is None)
def test_create_volume_from_snapshot(self):
svol = self._create_volume()
svol['volume_size'] = svol['size']
vol = self.driver.create_volume_from_snapshot(_VOLUME, svol)
self.assertNotEqual(vol, None)
# cleanup
self.backend.deleteVolumebyProvider(svol['provider_location'])
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location')
def test_initialize_connection(self, m_update_vol_location):
connector = {}
connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2'
connector['host'] = 'dut_1.lab.hds.com'
vol = self._create_volume()
conn = self.driver.initialize_connection(vol, connector)
self.assertTrue('3260' in conn['data']['target_portal'])
# cleanup
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(iscsi.HDSISCSIDriver, '_update_vol_location')
def test_terminate_connection(self, m_update_vol_location):
connector = {}
connector['initiator'] = 'iqn.1993-08.org.debian:01:11f90746eb2'
connector['host'] = 'dut_1.lab.hds.com'
vol = self._create_volume()
vol['provider_location'] = "portal," +\
connector['initiator'] +\
",18-48-A5-A1-80-13.0,ctl,port,hlun"
conn = self.driver.initialize_connection(vol, connector)
num_conn_before = self.backend.get_conns()
self.driver.terminate_connection(vol, conn)
num_conn_after = self.backend.get_conns()
self.assertNotEqual(num_conn_before, num_conn_after)
# cleanup
self.backend.deleteVolumebyProvider(vol['provider_location'])
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
return_value={'key': 'type', 'service_label': 'silver'})
def test_get_pool(self, m_ext_spec):
label = self.driver.get_pool(_VOLUME)
self.assertEqual('silver', label)
| apache-2.0 | -4,555,529,968,918,826,000 | 35.052747 | 79 | 0.586503 | false |
uannight/reposan | plugin.video.tvalacarta/lib/youtube_dl/extractor/common.py | 1 | 118491 | # coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country. (experimental)
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled. (experimental)
NB: both these geo attributes are experimental and may change in future
or be completely removed.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass(self._GEO_COUNTRIES)
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, countries):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES.
You may also manually call it from extractor's code if geo countries
information is not available beforehand (e.g. obtained during
extraction) or due to some another reason.
"""
if not self._x_forwarded_for_ip:
country_code = self._downloader.params.get('geo_bypass_country', None)
# If there is no explicit country for geo bypass specified and
# the extractor is known to be geo restricted let's fake IP
# as X-Forwarded-For right away.
if (not country_code and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
countries):
country_code = random.choice(countries)
if country_code:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._downloader.params.get('verbose', False):
self._downloader.to_stdout(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, video_id, video_title, getter=None, ie=None):
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl') or e.get('thumbnailURL'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
})
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
elif item_type == 'WebPage':
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/rg3/youtube-dl/issues/12211
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH') or
last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': manifest_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing audio group an audio group, it represents
# a complete (with audio and video) format. So, for such cases
# we will ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
t = representation_ms_info[template_name]
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/rg3/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
'url': media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
'url': segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
fragments.append({
'url': representation_ms_info['segment_urls'][segment_index],
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({'url': initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
for fragment in f['fragments']:
fragment['url'] = urljoin(base_url, fragment['url'])
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
res = self._download_webpage_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal)
if res is False:
return []
ism, urlh = res
return self._parse_ism_formats(
compat_etree_fromstring(ism.encode('utf-8')), urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC')
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
width = int_or_none(track.get('MaxWidth'))
height = int_or_none(track.get('MaxHeight'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type):
full_url = absolute_url(src)
ext = determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/rg3/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>video|audio)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
is_plain_url, formats = _media_formats(src, media_type)
if is_plain_url:
f = parse_content_type(source_attributes.get('type'))
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/+])/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
url_base = self._search_regex(r'(?:https?|rtmp|rtsp)(://[^?]+)', url, 'format url')
http_base_url = 'http' + url_base
formats = []
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
http_base_url + '/manifest.mpd',
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
http_base_url + '/jwplayer.smil',
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': protocol + url_base,
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
self._sort_formats(formats)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if track.get('kind') != 'captions':
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entries.append({
'id': this_video_id,
'title': video_data['title'] if require_title else video_data.get('title'),
'description': video_data.get('description'),
'thumbnail': self._proto_relative_url(video_data.get('image')),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
'formats': formats,
})
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
source_url = self._proto_relative_url(source.get('file'))
if not source_url:
continue
if base_url:
source_url = compat_urlparse.urljoin(base_url, source_url)
if source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| gpl-2.0 | -1,317,547,598,578,859,800 | 45.640945 | 163 | 0.526691 | false |
benedictpaten/marginPhase | toil/src/toil_marginphase/scripts/bam_stats.py | 1 | 26073 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import sys
import numpy as np
import pysam
import math
# read keys
R_ID = "id"
R_START_POS = "start_pos"
R_END_POS = "end_pos"
R_LENGTH = "length"
R_SECONDARY = "secondary_alignment"
R_SUPPLEMENTARY = "supplementary_alignment"
R_MAPPING_QUALITY = "mapping_quality"
R_CHROMOSOME = "chromosome"
#bam summary
B_READ_COUNT = "read_count"
B_SECONDARY_COUNT = "secondary_count"
B_SUPPLEMENTARY_COUNT = "supplementary_count"
B_FILTERED_READ_COUNT = "filtered_read_count"
B_CHROMOSOME = "chromosome"
B_MEDIAN_QUAL = "median_quality"
#length summary
L_LOG_LENGTH_BUCKETS = "log_length_buckets"
L_LOG_LENGTH_BUCKETS_ALT = "log_length_buckets_alt"
L_MIN = "min_length"
L_MAX = "max_length"
L_AVG = "avg_length"
L_MED = 'median_len'
L_STD = "std_lenght"
L_N50 = "N50"
L_LOG_BASE = "log_base"
L_LOG_BASE_ALT = "log_base_alt"
L_LOG_MAX = "log_max"
L_ALL_LENGTHS = "all_lengths"
# depth summary
D_MAX = "max_depth"
D_MIN = "min_depth"
D_MED = "median_depth"
D_AVG = "avg_depth"
D_STD = "std_depth"
D_ALL_DEPTHS = "all_depths"
D_ALL_DEPTH_POSITIONS = "all_depth_positions"
D_ALL_DEPTH_MAP = "all_depth_map"
D_ALL_DEPTH_BINS = "all_depth_bins"
D_SPACING = "depth_spacing"
D_START_IDX = "depth_start_idx"
D_RANGE = "depth_range"
# misc
GENOME_KEY = "genome"
LENGTH_LOG_BASE_DEFAULT=2
LENGTH_LOG_BASE_ALT=10
LENGTH_LOG_MAX_DEFAULT=32
percent = lambda small, big: int(100.0 * small / big)
def parse_args(args = None):
parser = argparse.ArgumentParser("Provides statistics on a BAM/SAM file")
parser.add_argument('--input_glob', '-i', dest='input_glob', default=None, required=True, type=str,
help='Glob matching SAM or BAM file(s)')
parser.add_argument('--generic_stats', '-g', dest='generic_stats', action='store_true', default=False,
help='Print generic stats for all files')
parser.add_argument('--read_length', '-l', dest='read_length', action='store_true', default=False,
help='Print statistics on read length for all files')
parser.add_argument('--read_depth', '-d', dest='read_depth', action='store_true', default=False,
help='Print statistics on read depth for all files')
parser.add_argument('--genome_only', dest='genome_only', action='store_true', default=False,
help='Print only statistics for the whole genome (do not print stats for individual chroms)')
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', default=False,
help='Print histograms for length and depth')
parser.add_argument('--silent', '-V', dest='silent', action='store_true', default=False,
help='Print nothing')
parser.add_argument('--depth_spacing', '-s', dest='depth_spacing', action='store', default=1000, type=int,
help='How far to sample read data')
parser.add_argument('--depth_range', '-r', dest='depth_range', action='store', default=None,
help='Whether to only calculate depth within a range, ie: \'100000-200000\'')
parser.add_argument('--filter_secondary', dest='filter_secondary', action='store_true', default=False,
help='Filter secondary alignments out')
parser.add_argument('--filter_supplementary', dest='filter_supplementary', action='store_true', default=False,
help='Filter supplemenary alignments out')
parser.add_argument('--filter_read_length_min', dest='read_length_min', action='store', default=None, type=int,
help='Removes reads with length below this')
parser.add_argument('--filter_read_length_max', dest='read_length_max', action='store', default=None, type=int,
help='Removes reads with length above this')
parser.add_argument('--filter_alignment_threshold_min', dest='min_alignment_threshold', action='store',
default=None, type=int, help='Minimum alignment quality threshold')
parser.add_argument('--produce_read_length_tsv', dest='read_length_tsv', action='store',
default=None, type=str, help='Produce a TSV with read lengths, named as this parameter')
parser.add_argument('--read_length_bucket_size', dest='read_length_bucket_size', action='store',
default=50000, type=int, help='Bucket size for read length TSV')
return parser.parse_args() if args is None else parser.parse_args(args)
def get_read_summary(read):
summary = {
R_START_POS: read.reference_start,
R_END_POS: read.reference_end,
R_LENGTH: read.reference_length,
R_ID: read.query_name,
R_SECONDARY: read.is_secondary,
R_SUPPLEMENTARY: read.is_supplementary,
R_MAPPING_QUALITY: read.mapping_quality,
R_CHROMOSOME: read.reference_name
}
return summary
def caluclate_n50(lengths):
half_total_length = sum(lengths) / 2
lengths.sort(reverse=True)
for l in lengths:
half_total_length -= l
if half_total_length <= 0:
return l
def get_read_length_summary(read_summaries, length_log_base=LENGTH_LOG_BASE_DEFAULT,
length_log_max=LENGTH_LOG_MAX_DEFAULT, length_log_base_alt=LENGTH_LOG_BASE_ALT):
# what we look for
log_length_bins = [0 for _ in range(length_log_max)]
log_lenght_alt_bins = [0 for _ in range(length_log_base_alt)] if length_log_base_alt is not None else None
all_lengths = []
for read_summary in read_summaries:
if read_summary[R_LENGTH] is None:
#todo
continue
log_length_bins[int(math.log(read_summary[R_LENGTH], length_log_base))] += 1.0
if length_log_base_alt is not None: log_lenght_alt_bins[int(math.log(read_summary[R_LENGTH], length_log_base_alt))] += 1.0
all_lengths.append(read_summary[R_LENGTH])
summary = {
L_LOG_LENGTH_BUCKETS: log_length_bins,
L_LOG_LENGTH_BUCKETS_ALT: log_lenght_alt_bins,
L_MAX: max(all_lengths),
L_MIN: min(all_lengths),
L_AVG: np.mean(all_lengths),
L_MED: np.median(all_lengths),
L_STD: np.std(all_lengths),
L_N50: caluclate_n50(all_lengths),
L_LOG_BASE: length_log_base,
L_LOG_BASE_ALT: length_log_base_alt,
L_LOG_MAX: length_log_max,
L_ALL_LENGTHS: all_lengths
}
return summary
def graph_read_length_summary(summary, title, save_name=None):
log_lengths = list(summary.keys())
log_lengths.sort()
x = log_lengths
y = [summary[l] for l in log_lengths]
import matplotlib.pyplot as plt
plt.bar(x, y, color='g')
plt.xlabel("Read Length (Log {})".format(LENGTH_LOG_BASE_DEFAULT))
plt.ylabel("Count")
plt.title(title)
if save_name is not None:
plt.savefig(save_name)
else:
plt.show()
def print_generic_read_stats(summary, verbose=False, genome_only=False):
keys = list(summary.keys())
keys.sort()
#print genome last
if GENOME_KEY in keys:
keys.remove(GENOME_KEY)
keys.append(GENOME_KEY)
#print them all
for chrom in keys:
# if we only have one chrom, skip genome reporting
if len(keys) == 2 and chrom == GENOME_KEY and not genome_only: continue
# if genome_only, skip chroms
if genome_only and chrom != GENOME_KEY: continue
B_READ_COUNT = "read_count"
B_SECONDARY_COUNT = "secondary_count"
B_SUPPLEMENTARY_COUNT = "supplementary_count"
print("\tGENERIC STATS: {}".format(chrom))
print("\t\tcount : {}".format(summary[chrom][B_READ_COUNT]))
print("\t\tsecondary : {} ({}%)".format(summary[chrom][B_SECONDARY_COUNT],
percent(summary[chrom][B_SECONDARY_COUNT],
summary[chrom][B_READ_COUNT])))
print("\t\tsupplenatary : {} ({}%)".format(summary[chrom][B_SUPPLEMENTARY_COUNT],
percent(summary[chrom][B_SUPPLEMENTARY_COUNT],
summary[chrom][B_READ_COUNT])))
print("\t\tmedian qual : {}".format(summary[chrom][B_MEDIAN_QUAL]))
def print_log_binned_data(log_bins, indent_count=3):
max_bucket = max(list(filter(lambda x: log_bins[x] != 0, [x for x in range(len(log_bins))])))
min_bucket = min(list(filter(lambda x: log_bins[x] != 0, [x for x in range(len(log_bins))])))
max_bucket_size = max(log_bins)
total_bucket_size = sum(log_bins)
total_bucket_size_left = total_bucket_size
for bucket in range(min_bucket, max_bucket + 1):
id = "%3d:" % bucket
count = int(log_bins[bucket])
pound_count = int(32.0 * count / max_bucket_size)
of_total = 1.0 * count / total_bucket_size
at_least = 1.0 * total_bucket_size_left / total_bucket_size
total_bucket_size_left -= count
print("{} {} {}{} {:6d}\t({:.3f} of total)\t({:.3f} at least)".format(
'\t'*indent_count, id, "#"*pound_count, " "*(32 - pound_count), count, of_total, at_least))
def print_read_length_summary(summary, verbose=False, genome_only=False):
keys = list(summary.keys())
keys.sort()
#print genome last
if GENOME_KEY in keys:
keys.remove(GENOME_KEY)
keys.append(GENOME_KEY)
#print them all
for chrom in keys:
# if we only have one chrom, skip genome reporting
if len(keys) == 2 and chrom == GENOME_KEY and not genome_only: continue
# if genome_only, skip chroms
if genome_only and chrom != GENOME_KEY: continue
print("\tREAD LENGTHS: {}".format(chrom))
print("\t\tmin: {}".format(summary[chrom][L_MIN]))
print("\t\tmax: {}".format(summary[chrom][L_MAX]))
print("\t\tmed: {}".format(summary[chrom][L_MED]))
print("\t\tavg: {}".format(int(summary[chrom][L_AVG])))
print("\t\tstd: {}".format(int(summary[chrom][L_STD])))
print("\t\tN50: {}".format(int(summary[chrom][L_N50])))
if verbose:
print("\t\tread length log_{}:".format(summary[chrom][L_LOG_BASE]))
print_log_binned_data(summary[chrom][L_LOG_LENGTH_BUCKETS])
if L_LOG_LENGTH_BUCKETS_ALT in summary[chrom] and summary[chrom][L_LOG_LENGTH_BUCKETS_ALT] is not None:
print("\t\tread length log_{}:".format(summary[chrom][L_LOG_BASE_ALT]))
print_log_binned_data(summary[chrom][L_LOG_LENGTH_BUCKETS_ALT])
def get_read_depth_summary(read_summaries, spacing, included_range=None):
S, E = 's', 'e'
# get reads which start or end on spacing interval
positions = []
for summary in read_summaries:
if summary[R_LENGTH] is None:
#todo
continue
positions.append((S, int(summary[R_START_POS]/spacing)))
positions.append((E, int(summary[R_END_POS]/spacing)))
# sort them: we iterate from the high end by popping off
positions.sort(key=lambda x: x[1])
start_idx = positions[0][1]
end_idx = positions[-1][1]
# data we care about
depths = [0 for _ in range(end_idx - start_idx + 1)]
depth_positions = []
# iterate over all read starts and ends
depth = 0
idx = end_idx
while idx >= start_idx:
curr = positions.pop()
while curr[1] == idx:
if curr[0] == E: depth += 1
if curr[0] == S: depth -= 1
# iterate
if len(positions) == 0: break
else: curr = positions.pop()
positions.append(curr)
# save and iterate
pos = idx - start_idx
depths[pos] = depth
depth_positions.append(idx)
idx -= 1
#todo I don't like that I don't know why I need to do this
depth_positions.reverse()
assert depth == 0
assert len(positions) == 1
assert len(depths) == len(depth_positions)
depth_map = {pos: depth for pos, depth in zip(depth_positions, depths)}
# check range before outputting summary
if included_range is not None:
# get range
included_range = list(map(int, included_range.split("-")))
if len(included_range) != 2:
raise Exception("Malformed depth range: '{}'".format("-".join(map(str, included_range))))
range_start = int(included_range[0]/spacing)
range_end = int(included_range[1]/spacing)
# sanity check
if range_start > end_idx or range_end < start_idx or range_start >= range_end:
raise Exception("Range {} outside of bounds of chunks: {}".format("-".join(map(str, included_range)),
"-".join(map(str, [start_idx*spacing, end_idx*spacing]))))
new_depths = list()
new_depth_positions = list()
new_depth_map = dict()
for i in range(range_start, range_end):
new_depth_positions.append(i)
new_depths.append(depth_map[i])
new_depth_map[i] = depth_map[i]
# update values
depths = new_depths
depth_positions = new_depth_positions
depth_map = new_depth_map
start_idx = max(start_idx, range_start)
assert len(depths) > 0
assert len(new_depths) == len(new_depth_positions)
# get read depth log value
log_depth_bins = [0 for _ in range(16)]
for depth in depths:
if depth == 0:
log_depth_bins[0] += 1
else:
log_depth_bins[int(math.log(depth, 2))] += 1
# get depth summary
summary = {
D_MAX: max(depths),
D_MIN: min(depths),
D_MED: np.median(depths),
D_AVG: np.mean(depths),
D_STD: np.std(depths),
D_ALL_DEPTHS: depths,
D_ALL_DEPTH_POSITIONS: depth_positions,
D_ALL_DEPTH_MAP: depth_map,
D_ALL_DEPTH_BINS: log_depth_bins,
D_SPACING: spacing,
D_START_IDX: start_idx,
D_RANGE: included_range
}
return summary
def get_genome_depth_summary(summaries):
depths = list()
for summary in summaries.values():
depths.extend(summary[D_ALL_DEPTHS])
summary = {
D_MAX: max(depths),
D_MIN: min(depths),
D_MED: np.median(depths),
D_AVG: np.mean(depths),
D_STD: np.std(depths),
D_ALL_DEPTHS: None,
D_ALL_DEPTH_POSITIONS: None,
D_ALL_DEPTH_MAP: None,
D_ALL_DEPTH_BINS: None,
D_SPACING: None,
D_START_IDX: None,
D_RANGE: None
}
return summary
def write_read_length_tsv(reads, filename, bucket_size=50000):
length_to_bucket = lambda x: int(1.0 * x / bucket_size)
read_lengths = dict()
for read in reads:
bucket = length_to_bucket(read[R_LENGTH])
while len(read_lengths) <= bucket:
read_lengths[len(read_lengths)] = 0
read_lengths[bucket] += 1
with open(filename, 'w') as output:
output.write("#min_length\tmax_length\tread_count\n")
started = False
for i in range(len(read_lengths)):
if not started and read_lengths[i] == 0:
continue
started = True
output.write("{}\t{}\t{}\n".format(bucket_size * i, bucket_size * i + bucket_size - 1, read_lengths[i]))
def print_read_depth_summary(summary, verbose=False, genome_only=False):
keys = list(summary.keys())
keys.sort()
#print genome last
if GENOME_KEY in keys:
keys.remove(GENOME_KEY)
keys.append(GENOME_KEY)
for chrom in keys:
# if we only have one chrom, skip genome reporting
if len(keys) == 2 and chrom == GENOME_KEY and not genome_only: continue
# if genome_only, skip chroms
if genome_only and chrom != GENOME_KEY: continue
print("\tREAD DEPTHS: {}".format(chrom))
print("\t\tmax: {}".format(summary[chrom][D_MAX]))
print("\t\tmin: {}".format(summary[chrom][D_MIN]))
print("\t\tmed: {}".format(summary[chrom][D_MED]))
print("\t\tavg: {}".format(summary[chrom][D_AVG]))
print("\t\tstd: {}".format(summary[chrom][D_STD]))
if chrom != GENOME_KEY and summary[chrom][D_ALL_DEPTH_BINS] is not None:
log_depth_bins = summary[chrom][D_ALL_DEPTH_BINS]
total_depths = sum(log_depth_bins)
log_depth_pairs = [(i, log_depth_bins[i]) for i in range(len(log_depth_bins))]
log_depth_pairs.sort(key=lambda x: x[1], reverse=True)
print("\t\tmost frequent read depths [floor(log2(depth))]:")
for i in range(0,min(len(list(filter(lambda x: x[1] != 0, log_depth_pairs))), 3)):
print("\t\t\t#{}: depth:{} count:{} ({}%)".format(i + 1, log_depth_pairs[i][0], log_depth_pairs[i][1],
int(100.0 * log_depth_pairs[i][1] / total_depths)))
if verbose:
if chrom != GENOME_KEY and summary[chrom][D_RANGE] is not None:
print("\t\tdepths with spacing {}{}:".format(summary[chrom][D_SPACING],
"" if summary[chrom][D_RANGE] is None else
", and range {}".format(summary[chrom][D_RANGE])))
for idx in summary[chrom][D_ALL_DEPTH_POSITIONS]:
depth = summary[chrom][D_ALL_DEPTH_MAP][idx]
id = "%4d:" % idx
pound_count = int(32.0 * depth / summary[chrom][D_MAX])
print("\t\t\t{} {} {}".format(id, '#' * pound_count, depth))
if chrom != GENOME_KEY and summary[chrom][D_ALL_DEPTH_BINS] is not None:
print("\t\tread depth log_2 at above intervals:")
print_log_binned_data(log_depth_bins)
# max_bucket = max(list(filter(lambda x: log_depth_bins[x] != 0, [x for x in range(16)])))
# min_bucket = min(list(filter(lambda x: log_depth_bins[x] != 0, [x for x in range(16)])))
# max_bucket_size = max(log_depth_bins)
# for bucket in range(min_bucket, max_bucket + 1):
# id = "%3d:" % bucket
# count = log_depth_bins[bucket]
# pound_count = int(32.0 * count / max_bucket_size)
# print("\t\t\t{} {} {}".format(id, "#" * pound_count, count))
def main(args = None):
# get our arguments
args = parse_args() if args is None else parse_args(args)
if True not in [args.generic_stats, args.read_depth, args.read_length]:
args.generic_stats, args.read_depth, args.read_length = True, True, True
# get filenames, sanity check
in_alignments = glob.glob(args.input_glob)
if len(in_alignments) == 0:
print("No files matching {}".format(args.input_glob))
return 1
else:
if not args.silent: print("Analyzing {} files".format(len(in_alignments)))
# data we care about
bam_summaries = dict()
length_summaries = dict()
depth_summaries = dict()
all_read_summaries = list()
# iterate over all alignments
for alignment_filename in in_alignments:
# sanity check
if not (alignment_filename.endswith("sam") or alignment_filename.endswith("bam")):
print("Matched file {} has unexpected filetype".format(alignment_filename))
continue
# prep
bam_summaries[alignment_filename] = {}
length_summaries[alignment_filename] = {}
depth_summaries[alignment_filename] = {}
# data we're gathering
read_summaries = list()
chromosomes = set()
# get read data we care about
samfile = None
read_count = 0
try:
if not args.silent: print("Read {}:".format(alignment_filename))
samfile = pysam.AlignmentFile(alignment_filename, 'rb' if alignment_filename.endswith("bam") else 'r')
for read in samfile.fetch():
read_count += 1
summary = get_read_summary(read)
read_summaries.append(summary)
chromosomes.add(read.reference_name)
finally:
if samfile is not None: samfile.close()
bad_read_count = len(list(filter(lambda x: x[R_LENGTH] is None, read_summaries)))
if bad_read_count > 0 and not args.silent:
print("\tGot {}/{} ({}%) bad reads in {}. Filtering out."
.format(bad_read_count, len(read_summaries), int(100.0 * bad_read_count / len(read_summaries)),
alignment_filename), file=sys.stderr)
read_summaries = list(filter(lambda x: x[R_LENGTH] is not None, read_summaries))
# filter if appropriate
did_filter = False
if args.filter_secondary:
if not args.silent: print("\tFiltering secondary reads")
read_summaries = list(filter(lambda x: not x[R_SECONDARY], read_summaries))
did_filter = True
if args.filter_supplementary:
if not args.silent: print("\tFiltering supplementary reads")
read_summaries = list(filter(lambda x: not x[R_SUPPLEMENTARY], read_summaries))
did_filter = True
if args.min_alignment_threshold is not None:
if not args.silent: print("\tFiltering reads below map quality {}".format(args.min_alignment_threshold))
read_summaries = list(filter(lambda x: x[R_MAPPING_QUALITY] >= args.min_alignment_threshold, read_summaries))
did_filter = True
if args.read_length_min is not None:
if not args.silent: print("\tFiltering reads below length {}".format(args.read_length_min))
read_summaries = list(filter(lambda x: x[R_LENGTH] >= args.read_length_min, read_summaries))
did_filter = True
if args.read_length_max is not None:
if not args.silent: print("\tFiltering reads above length {}".format(args.read_length_max))
read_summaries = list(filter(lambda x: x[R_LENGTH] <= args.read_length_max, read_summaries))
did_filter = True
if did_filter:
filtered_read_count = len(read_summaries)
if not args.silent:
print("\tFiltering removed {}/{} reads ({}% remaining) "
.format((read_count - filtered_read_count), read_count, 100 * filtered_read_count / read_count))
# summarize
for chromosome in chromosomes:
#prep
chromosome_reads = list()
chrom_read_count = 0
chrom_sec_count = 0
chrom_sup_count = 0
# analyze
for read in read_summaries:
if read[R_CHROMOSOME] == chromosome:
chromosome_reads.append(read)
chrom_read_count += 1
if read[R_SECONDARY]: chrom_sec_count += 1
if read[R_SUPPLEMENTARY]: chrom_sup_count += 1
# filtered out all reads
if len(chromosome_reads) == 0: continue
# summarize
bam_summaries[alignment_filename][chromosome] = {
B_READ_COUNT: chrom_read_count,
B_SECONDARY_COUNT: chrom_sec_count,
B_SUPPLEMENTARY_COUNT: chrom_sup_count,
B_MEDIAN_QUAL: np.median(list(map(lambda x: x[R_MAPPING_QUALITY], chromosome_reads))),
B_FILTERED_READ_COUNT:len(chromosome_reads),
B_CHROMOSOME: chromosome
}
length_summaries[alignment_filename][chromosome] = get_read_length_summary(chromosome_reads)
depth_summaries[alignment_filename][chromosome] = get_read_depth_summary(chromosome_reads,
spacing=args.depth_spacing,
included_range=args.depth_range)
# whole file summaries
bam_summaries[alignment_filename][GENOME_KEY] = {
B_READ_COUNT: read_count,
B_SECONDARY_COUNT: len(list(filter(lambda x: x[R_SECONDARY], read_summaries))),
B_SUPPLEMENTARY_COUNT: len(list(filter(lambda x: x[R_SUPPLEMENTARY], read_summaries))),
B_MEDIAN_QUAL: np.median(list(map(lambda x: x[R_MAPPING_QUALITY], read_summaries))),
B_FILTERED_READ_COUNT: len(read_summaries),
B_CHROMOSOME: GENOME_KEY
}
length_summaries[alignment_filename][GENOME_KEY] = get_read_length_summary(read_summaries)
depth_summaries[alignment_filename][GENOME_KEY] = get_genome_depth_summary(depth_summaries[alignment_filename])
# print
if args.generic_stats:
if not args.silent: print_generic_read_stats(bam_summaries[alignment_filename],
verbose=args.verbose, genome_only=args.genome_only)
if args.read_length:
if not args.silent: print_read_length_summary(length_summaries[alignment_filename],
verbose=args.verbose, genome_only=args.genome_only)
if args.read_depth:
if not args.silent: print_read_depth_summary(depth_summaries[alignment_filename],
verbose=args.verbose, genome_only=args.genome_only)
# save
all_read_summaries.extend(read_summaries)
# do whole run analysis
if args.read_length and not args.silent and len(in_alignments) > 1:
print_read_length_summary({'ALL_FILES':get_read_length_summary(all_read_summaries)}, verbose=args.verbose)
# tsv
if args.read_length_tsv is not None:
write_read_length_tsv(all_read_summaries, args.read_length_tsv, args.read_length_bucket_size)
return bam_summaries, length_summaries, depth_summaries
if __name__ == "__main__":
main()
| mit | 792,703,284,669,462,400 | 41.395122 | 136 | 0.586699 | false |
Ophiuchus1312/enigma2-master | lib/python/Plugins/SystemPlugins/HdmiCEC/plugin.py | 1 | 5811 | from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, configfile, getConfigListEntry
from Components.Sources.StaticText import StaticText
from Components.SystemInfo import SystemInfo
from Tools.Directories import fileExists
from os import path
if path.exists("/dev/hdmi_cec") or path.exists("/dev/misc/hdmi_cec0"):
import Components.HdmiCec
class HdmiCECSetupScreen(Screen, ConfigListScreen):
skin = """
<screen position="c-300,c-250" size="600,500" title="HDMI CEC setup">
<widget name="config" position="25,25" size="550,350" />
<widget source="current_address" render="Label" position="25,375" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<widget source="fixed_address" render="Label" position="25,405" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<ePixmap pixmap="buttons/red.png" position="20,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="160,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="300,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="440,e-45" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="20,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="160,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="300,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="440,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("HDMI CEC Setup"))
from Components.ActionMap import ActionMap
from Components.Button import Button
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Set fixed"))
self["key_blue"] = StaticText(_("Clear fixed"))
self["current_address"] = StaticText()
self["fixed_address"] = StaticText()
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
"yellow": self.setFixedAddress,
"blue": self.clearFixedAddress,
"menu": self.closeRecursive,
}, -2)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self.createSetup()
def createSetup(self):
self.list = []
self.list.append(getConfigListEntry(_("Enabled"), config.hdmicec.enabled))
if config.hdmicec.enabled.getValue():
self.list.append(getConfigListEntry(_("Put TV in standby"), config.hdmicec.control_tv_standby))
self.list.append(getConfigListEntry(_("Wakeup TV from standby"), config.hdmicec.control_tv_wakeup))
self.list.append(getConfigListEntry(_("Regard deep standby as standby"), config.hdmicec.handle_deepstandby_events))
self.list.append(getConfigListEntry(_("Switch TV to correct input"), config.hdmicec.report_active_source))
self.list.append(getConfigListEntry(_("Use TV remote control"), config.hdmicec.report_active_menu))
self.list.append(getConfigListEntry(_("Handle standby from TV"), config.hdmicec.handle_tv_standby))
self.list.append(getConfigListEntry(_("Handle wakeup from TV"), config.hdmicec.handle_tv_wakeup))
self.list.append(getConfigListEntry(_("Wakeup signal from TV"), config.hdmicec.tv_wakeup_detection))
self.list.append(getConfigListEntry(_("Forward volume keys"), config.hdmicec.volume_forwarding))
self.list.append(getConfigListEntry(_("Put your STB_BOX in standby"), config.hdmicec.control_receiver_standby))
self.list.append(getConfigListEntry(_("Wakeup your STB_BOX from standby"), config.hdmicec.control_receiver_wakeup))
self.list.append(getConfigListEntry(_("Minimum send interval"), config.hdmicec.minimum_send_interval))
if fileExists("/proc/stb/hdmi/preemphasis"):
self.list.append(getConfigListEntry(_("Use HDMI-preemphasis"), config.hdmicec.preemphasis))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.updateAddress()
# for summary:
def changedEntry(self):
if self["config"].getCurrent()[0] == _("Enabled"):
self.createSetup()
for x in self.onChangedEntry:
x()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyGo(self):
for x in self["config"].list:
x[1].save()
configfile.save()
self.close()
def keyCancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
def setFixedAddress(self):
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress(Components.HdmiCec.hdmi_cec.getPhysicalAddress())
self.updateAddress()
def clearFixedAddress(self):
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress("0.0.0.0")
self.updateAddress()
def updateAddress(self):
self["current_address"].setText(_("Current CEC address") + ": " + Components.HdmiCec.hdmi_cec.getPhysicalAddress())
if config.hdmicec.fixed_physical_address.getValue() == "0.0.0.0":
fixedaddresslabel = ""
else:
fixedaddresslabel = _("Using fixed address") + ": " + config.hdmicec.fixed_physical_address.getValue()
self["fixed_address"].setText(fixedaddresslabel)
def Plugins(**kwargs):
return []
| gpl-2.0 | 7,013,287,884,594,974,000 | 46.631148 | 187 | 0.722767 | false |
teemulehtinen/a-plus | external_services/tests.py | 1 | 5522 | from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from course.models import Course, CourseInstance
from userprofile.models import User
from .cache import CachedCourseMenu
from .models import LinkService, LTIService, MenuItem
from .templatetags import external_services as tags
class ExternalServicesTest(TestCase):
def setUp(self):
self.user = User(username="testUser")
self.user.set_password("testPassword")
self.user.save()
self.assistant = User(username="testUser2")
self.assistant.set_password("testPassword")
self.assistant.save()
self.link_service = LinkService.objects.create(
url="http://www.external-service.com",
menu_label="External Service"
)
self.disabled_link_service = LinkService.objects.create(
url="http://www.disabled-external-service.com",
menu_label="Disabled External Service",
enabled=False
)
self.lti_service = LTIService.objects.create(
url="http://www.lti-service.com",
menu_label="LTI Service",
menu_icon_class="star",
consumer_key="123456789",
consumer_secret="987654321"
)
self.course = Course.objects.create(
name="test course",
code="123456",
url="Course-Url"
)
self.today = timezone.now()
self.tomorrow = self.today + timedelta(days=1)
self.course_instance = CourseInstance.objects.create(
instance_name="Fall 2011",
starting_time=self.today,
ending_time=self.tomorrow,
course=self.course,
url="T-00.1000_2011"
)
self.course_instance.enroll_student(self.user)
self.course_instance.assistants.add(self.assistant.userprofile)
self.menu_item1 = MenuItem.objects.create(
service=self.link_service,
course_instance=self.course_instance,
access=MenuItem.ACCESS.STUDENT,
menu_label="Overriden Label",
menu_icon_class="star"
)
self.menu_item2 = MenuItem.objects.create(
service=self.link_service,
course_instance=self.course_instance,
access=MenuItem.ACCESS.STUDENT,
enabled=False
)
self.menu_item3 = MenuItem.objects.create(
service=self.disabled_link_service,
course_instance=self.course_instance,
access=MenuItem.ACCESS.STUDENT
)
self.menu_item4 = MenuItem.objects.create(
service=self.lti_service,
course_instance=self.course_instance,
access=MenuItem.ACCESS.STUDENT
)
self.menu_item5 = MenuItem.objects.create(
service=self.lti_service,
course_instance=self.course_instance,
access=MenuItem.ACCESS.ASSISTANT
)
def test_menuitem_label(self):
self.assertEqual("Overriden Label", self.menu_item1.label)
self.assertEqual("External Service", self.menu_item2.label)
self.assertEqual("Disabled External Service", self.menu_item3.label)
self.assertEqual("LTI Service", self.menu_item4.label)
self.assertEqual("LTI Service", self.menu_item5.label)
def test_menuitem_icon_class(self):
self.assertEqual("star", self.menu_item1.icon_class)
self.assertEqual("globe", self.menu_item2.icon_class)
self.assertEqual("globe", self.menu_item3.icon_class)
self.assertEqual("star", self.menu_item4.icon_class)
self.assertEqual("star", self.menu_item5.icon_class)
def test_menuitem_url(self):
self.assertEqual("http://www.external-service.com", self.menu_item1.url)
self.assertEqual("http://www.external-service.com", self.menu_item2.url)
self.assertEqual("http://www.disabled-external-service.com", self.menu_item3.url)
self.assertEqual("/Course-Url/T-00.1000_2011/lti-login/4/", self.menu_item4.url)
self.assertEqual("/Course-Url/T-00.1000_2011/lti-login/5/", self.menu_item5.url)
def test_view(self):
url = self.menu_item4.url
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
self.client.login(username="testUser", password="testPassword")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue("oauth_signature" in str(response.content))
url = self.menu_item5.url
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.client.login(username="testUser2", password="testPassword")
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.menu_item5.access = MenuItem.ACCESS.TEACHER
self.menu_item5.save()
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
self.course.teachers.add(self.assistant.userprofile)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_cached(self):
menu = CachedCourseMenu(self.course_instance)
self.assertEqual(len(menu.student_link_groups()), 1)
self.assertEqual(len(menu.student_link_groups()[0]['items']), 4)
self.assertEqual(len(menu.staff_link_groups()), 1)
self.assertEqual(len(menu.staff_link_groups()[0]['items']), 1)
| gpl-3.0 | 3,099,727,907,288,486,000 | 37.082759 | 89 | 0.638718 | false |
partofthething/home-assistant | homeassistant/components/gogogate2/sensor.py | 1 | 1787 | """Support for Gogogate2 garage Doors."""
from typing import Callable, List, Optional
from gogogate2_api.common import get_configured_doors
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DEVICE_CLASS_BATTERY
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from .common import GoGoGate2Entity, get_data_update_coordinator
SENSOR_ID_WIRED = "WIRE"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], Optional[bool]], None],
) -> None:
"""Set up the config entry."""
data_update_coordinator = get_data_update_coordinator(hass, config_entry)
async_add_entities(
[
DoorSensor(config_entry, data_update_coordinator, door)
for door in get_configured_doors(data_update_coordinator.data)
if door.sensorid and door.sensorid != SENSOR_ID_WIRED
]
)
class DoorSensor(GoGoGate2Entity):
"""Sensor entity for goggate2."""
@property
def name(self):
"""Return the name of the door."""
return f"{self._get_door().name} battery"
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_BATTERY
@property
def state(self):
"""Return the state of the entity."""
door = self._get_door()
return door.voltage # This is a percentage, not an absolute voltage
@property
def device_state_attributes(self):
"""Return the state attributes."""
door = self._get_door()
if door.sensorid is not None:
return {"door_id": door.door_id, "sensor_id": door.sensorid}
return None
| mit | 4,837,301,209,623,546,000 | 29.810345 | 77 | 0.670957 | false |
jorol/StoryMapJS | api.py | 1 | 26872 | from __future__ import division
from flask import Flask, request, session, redirect, url_for, \
render_template, jsonify, abort
from collections import defaultdict
import math
import os
import sys
import importlib
import traceback
import time
import datetime
import re
import json
from functools import wraps
import urllib
from urlparse import urlparse
# Import settings module
if __name__ == "__main__":
if not os.environ.get('FLASK_SETTINGS_MODULE', ''):
os.environ['FLASK_SETTINGS_MODULE'] = 'core.settings.loc'
settings_module = os.environ.get('FLASK_SETTINGS_MODULE')
try:
importlib.import_module(settings_module)
except ImportError, e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (settings_module, e))
import hashlib
import requests
import slugify
import bson
from oauth2client.client import OAuth2WebServerFlow
from storymap import storage, google
from storymap.connection import _user
app = Flask(__name__)
app.config.from_envvar('FLASK_CONFIG_MODULE')
settings = sys.modules[settings_module]
_GOOGLE_OAUTH_SCOPES = [
'https://www.googleapis.com/auth/drive.readonly',
'https://www.googleapis.com/auth/userinfo.profile'
];
@app.context_processor
def inject_urls():
"""
Inject urls into the templates.
Template variable will always have a trailing slash.
"""
static_url = settings.STATIC_URL or app.static_url_path
if not static_url.endswith('/'):
static_url += '/'
storage_url = settings.AWS_STORAGE_BUCKET_URL
if not storage_url.endswith('/'):
storage_url += '/'
storage_url += settings.AWS_STORAGE_BUCKET_KEY
if not storage_url.endswith('/'):
storage_url += '/'
cdn_url = settings.CDN_URL
if not cdn_url.endswith('/'):
cdn_url += '/'
return dict(
STATIC_URL=static_url, static_url=static_url,
STORAGE_URL=storage_url, storage_url=storage_url,
CDN_URL=cdn_url, cdn_url=cdn_url)
class APIEncoder(json.JSONEncoder):
def default(self, obj):
"""Format obj as json."""
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, bson.objectid.ObjectId):
return str(obj)
return json.JSONEncoder.default(self, obj)
def _str2bool(s):
"""Convert string to boolean."""
return s.lower() in ("true", "t", "yes", "y", "1")
def _request_wants_json():
"""Determine response type."""
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['text/html']
def _jsonify(*args, **kwargs):
"""Convert to JSON"""
return app.response_class(json.dumps(dict(*args, **kwargs), cls=APIEncoder),
mimetype='application/json')
def _format_err(err_type, err_msg):
return "%s: %s" % (err_type, err_msg)
def _get_uid(user_string):
"""Generate a unique identifer for user string"""
return hashlib.md5(user_string).hexdigest()
def _utc_now():
return datetime.datetime.utcnow().isoformat()+'Z'
#
# request/session
#
def _request_get(*keys):
"""Get request data and return values"""
if request.method == 'POST':
obj = request.form
else:
obj = request.args
values = []
for k in keys:
v = obj.get(k)
values.append(v)
if len(values) > 1:
return values
return values[0]
def _request_get_required(*keys):
"""Verify existence of request data and return values"""
if request.method == 'POST':
obj = request.form
else:
obj = request.args
values = []
for k in keys:
v = obj.get(k)
if not v:
raise Exception('Expected "%s" parameter' % k)
values.append(v)
if len(values) > 1:
return values
return values[0]
def _session_get(*keys):
"""Verify existence of session data and return value"""
values = []
for k in keys:
v = session.get(key)
if not v:
raise Exception('Expected "%s" in session' % key)
values.append(v)
if len(values) > 1:
return values
return values[0]
def _session_pop(*keys):
"""Remove list of keys from session"""
for k in keys:
if k in session:
session.pop(k)
#
# auth
# https://developers.google.com/drive/web/quickstart/quickstart-python
#
@app.route("/google/auth/start/", methods=['GET', 'POST'])
def google_auth_start():
"""Initiate google authorization"""
flow = OAuth2WebServerFlow(
settings.GOOGLE_CLIENT_ID,
settings.GOOGLE_CLIENT_SECRET,
_GOOGLE_OAUTH_SCOPES,
redirect_uri='https://'+request.host+url_for('google_auth_verify')
)
authorize_url = flow.step1_get_authorize_url()
return redirect(authorize_url)
@app.route("/google/auth/verify/", methods=['GET', 'POST'])
def google_auth_verify():
"""Finalize google authorization"""
try:
if 'error' in request.args:
raise Exception(_format_err(
'Error getting authorization', request.args.get('error')))
code = _request_get_required('code')
flow = OAuth2WebServerFlow(
settings.GOOGLE_CLIENT_ID,
settings.GOOGLE_CLIENT_SECRET,
_GOOGLE_OAUTH_SCOPES,
redirect_uri='https://'+request.host+url_for('google_auth_verify')
)
credentials = flow.step2_exchange(code)
# ^ this is an oauth2client.client.OAuth2Credentials object
# Get user info
userinfo = google.get_userinfo(
google.get_userinfo_service(credentials))
if not userinfo:
raise Exception('Could not get Google user info')
info = {
'id': userinfo.get('id'),
'name': userinfo.get('name'),
'credentials': credentials.to_json()
}
if not info['id']:
raise Exception('Could not get Google user ID')
# Upsert user record
uid = _get_uid('google:'+info['id'])
user = _user.find_one({'uid': uid})
if user:
user['google'] = info
else:
user = {
'uid': uid,
'migrated': 0,
'storymaps': {},
'google': info
}
user['uname'] = info['name']
_user.save(user)
# Update session
session['uid'] = uid
return redirect(url_for('select'))
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
#
# Misc
#
def _user_get():
"""Enforce authenticated user"""
uid = session.get('uid')
if not uid:
return redirect(url_for('select'))
user = _user.find_one({'uid': uid})
if not user:
session.pop('uid')
return redirect(url_for('select'))
return user
def require_user(f):
"""
Decorator to enforce authenticated user
Adds user to request and kwargs
"""
@wraps(f)
def decorated_function(*args, **kwargs):
user = _user_get()
request.user = user
kwargs['user'] = user
return f(*args, **kwargs)
return decorated_function
def require_user_id(template=None):
"""
Decorator to enfore storymap access for authenticated user
Adds user to request and kwargs, adds id to kwargs
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
user = _user_get()
id = _request_get_required('id')
if id not in user['storymaps']:
error = 'You do not have permission to access to this StoryMap'
if template:
del user['_id'] # for serialization
return render_template('edit.html', user=user, error=error)
else:
return jsonify({'error': error})
request.user = user
kwargs['user'] = user
kwargs['id'] = id
return f(*args, **kwargs)
return decorated_function
return decorator
def _make_storymap_id(user, title):
"""Get unique storymap id from slugified title"""
id_set = set(user['storymaps'].keys())
# Add keys from S3 (in case of db issues)
user_key_prefix = storage.key_prefix(user['uid'])
regex = re.compile(r'^%s([^/]+).*' % user_key_prefix)
name_list, more = storage.list_key_names(user_key_prefix, 999, '')
for name in name_list:
m = regex.match(name)
if m:
id_set.add(m.group(1))
id_base = slugify.slugify(title)
id = id_base
n = 0
while id in id_set:
n += 1
id = '%s-%d' % (id_base, n)
return id
def _parse_url(url):
"""Parse url into (scheme, netloc, path, filename)"""
r = urlparse(url)
parts = r.path.split('/')
return {
'scheme': r.scheme or 'https', # embeds go on S3, which should always be https
'netloc': r.netloc,
'path': r.path
}
def _fix_url_for_opengraph(url):
parts = _parse_url(url)
parts['path'] = urllib.quote(parts['path'])
return '%(scheme)s://%(netloc)s%(path)s' % parts
def _write_embed(embed_key_name, json_key_name, meta):
"""Write embed page"""
image_url = meta.get('image_url', settings.STATIC_URL+'img/logos/logo_storymap.png')
# NOTE: facebook needs the protocol on embed_url and image_url for og tag
content = render_template('_embed.html',
embed_url=_fix_url_for_opengraph(settings.AWS_STORAGE_BUCKET_URL+embed_key_name),
json_url=urllib.quote(settings.AWS_STORAGE_BUCKET_URL+json_key_name),
title=meta.get('title', ''),
description=meta.get('description', ''),
image_url=_fix_url_for_opengraph(image_url)
)
storage.save_from_data(embed_key_name, 'text/html', content)
def _write_embed_draft(key_prefix, meta):
"""Write embed page for draft storymap """
_write_embed(key_prefix+'draft.html', key_prefix+'draft.json', meta)
def _write_embed_published(key_prefix, meta):
"""Write embed for published storymap"""
_write_embed(key_prefix+'index.html', key_prefix+'published.json', meta)
#
# API views
# (called from the select page)
#
@app.route('/storymap/update/meta/', methods=['GET', 'POST'])
@require_user_id()
def storymap_update_meta(user, id):
"""Update storymap meta value"""
try:
key, value = _request_get_required('key', 'value')
user['storymaps'][id][key] = value
_user.save(user)
key_prefix = storage.key_prefix(user['uid'], id)
if key in ['title', 'description', 'image_url']:
_write_embed_draft(key_prefix, user['storymaps'][id])
if user['storymaps'][id].get('published_on'):
_write_embed_published(key_prefix, user['storymaps'][id])
return jsonify(user['storymaps'][id])
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/copy/', methods=['GET', 'POST'])
@require_user_id()
def storymap_copy(user, id):
"""
Copy storymap
@id = storymap to copy
@title = name of new copy
"""
try:
title = _request_get_required('title')
dst_id = _make_storymap_id(user, title)
src_key_prefix = storage.key_prefix(user['uid'], id)
dst_key_prefix = storage.key_prefix(user['uid'], dst_id)
src_re = re.compile(r'%s' % src_key_prefix)
src_key_list, more = storage.list_keys(src_key_prefix, 999, '')
for src_key in src_key_list:
file_name = src_key.name.split(src_key_prefix)[-1]
dst_key_name = "%s%s" % (dst_key_prefix, file_name)
if file_name.endswith('.json'):
json_string = src_key.get_contents_as_string()
storage.save_json(dst_key_name,
src_re.sub(dst_key_prefix, json_string))
else:
storage.copy_key(src_key.name, dst_key_name)
# Update meta
user['storymaps'][dst_id] = {
'id': dst_id,
'title': title,
'draft_on': user['storymaps'][id]['draft_on'],
'published_on': user['storymaps'][id]['published_on']
}
_user.save(user)
# Write new embed pages
_write_embed_draft(dst_key_prefix, user['storymaps'][dst_id])
if user['storymaps'][dst_id].get('published_on'):
_write_embed_published(dst_key_prefix, user['storymaps'][dst_id])
return jsonify(user['storymaps'][dst_id])
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/delete/')
@require_user_id()
def storymap_delete(user, id):
"""Delete storymap"""
try:
key_name = storage.key_name(user['uid'], id)
key_list, marker = storage.list_keys(key_name, 50)
for key in key_list:
storage.delete(key.name);
del user['storymaps'][id]
_user.save(user)
return jsonify({})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/create/', methods=['POST'])
@require_user
def storymap_create(user):
"""Create a storymap"""
try:
title, data = _request_get_required('title', 'd')
id = _make_storymap_id(user, title)
key_prefix = storage.key_prefix(user['uid'], id)
content = json.loads(data)
storage.save_json(key_prefix+'draft.json', content)
user['storymaps'][id] = {
'id': id,
'title': title,
'draft_on': _utc_now(),
'published_on': ''
}
_user.save(user)
_write_embed_draft(key_prefix, user['storymaps'][id])
return jsonify({'id': id})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/migrate/done/', methods=['GET'])
@require_user
def storymap_migrate_done(user):
"""Flag user as migrated"""
try:
user['migrated'] = 1
_user.save(user)
return jsonify({})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/migrate/list/', methods=['GET', 'POST'])
@require_user
def storymap_migrate_list(user):
"""Get list of storymaps that still need to be migrated"""
try:
credentials = google.get_credentials(user['google']['credentials'])
drive_service = google.get_drive_service(credentials)
existing = [d['title'] for (k, d) in user['storymaps'].items()]
temp_list = google.drive_get_migrate_list(drive_service)
migrate_list = [r for r in temp_list if r['title'] not in existing]
return jsonify({'migrate_list': migrate_list})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/migrate/', methods=['POST'])
@require_user
def storymap_migrate(user):
"""
Migrate a storymap
@title = storymap title
@url = storymap base url
@draft_on = ...
@published_on = ...
@file_list = json encoded list of file names
"""
try:
title, src_url, draft_on, file_list_json = _request_get_required(
'title', 'url', 'draft_on', 'file_list')
published_on = _request_get('published_on')
file_list = json.loads(file_list_json)
dst_id = _make_storymap_id(user, title)
dst_key_prefix = storage.key_prefix(user['uid'], dst_id)
dst_url = settings.AWS_STORAGE_BUCKET_URL+dst_key_prefix
dst_img_url = dst_url+'_images/'
re_img = re.compile(r'.*\.(png|gif|jpg|jpeg)$', re.I)
re_src = re.compile(r'%s' % src_url)
for file_name in file_list:
file_url = "%s%s" % (src_url, file_name)
if file_name.endswith('.json'):
key_name = storage.key_name(user['uid'], dst_id, file_name)
r = requests.get(file_url)
storage.save_json(key_name, re_src.sub(dst_img_url, r.text))
elif re_img.match(file_name):
key_name = storage.key_name(user['uid'], dst_id, '_images', file_name)
storage.save_from_url(key_name, file_url)
else:
continue # skip
user['storymaps'][dst_id] = {
'id': dst_id,
'title': title,
'draft_on': draft_on,
'published_on': published_on
}
_user.save(user)
_write_embed_draft(dst_key_prefix, user['storymaps'][dst_id])
if published_on:
_write_embed_published(dst_key_prefix, user['storymaps'][dst_id])
return jsonify(user['storymaps'][dst_id])
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
#
# API views
# (called from the edit page)
#
@app.route('/storymap/')
@require_user_id()
def storymap_get(user, id):
"""Get storymap"""
try:
key_name = storage.key_name(user['uid'], id, 'draft.json')
data = storage.load_json(key_name)
return jsonify({'meta': user['storymaps'][id], 'data': data})
except storage.StorageException, e:
traceback.print_exc()
return jsonify({'error': str(e), 'error_detail': e.detail})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/save/', methods=['POST'])
@require_user_id()
def storymap_save(user, id):
"""Save draft storymap"""
try:
data = _request_get_required('d')
key_name = storage.key_name(user['uid'], id, 'draft.json')
content = json.loads(data)
storage.save_json(key_name, content)
user['storymaps'][id]['draft_on'] = _utc_now()
_user.save(user)
return jsonify({'meta': user['storymaps'][id]})
except storage.StorageException, e:
traceback.print_exc()
return jsonify({'error': str(e), 'error_detail': e.detail})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/publish/', methods=['POST'])
@require_user_id()
def storymap_publish(user, id):
"""Save published storymap"""
try:
data = _request_get_required('d')
key_prefix = storage.key_prefix(user['uid'], id)
content = json.loads(data)
storage.save_json(key_prefix+'published.json', content)
user['storymaps'][id]['published_on'] = _utc_now()
_user.save(user)
_write_embed_published(key_prefix, user['storymaps'][id])
return jsonify({'meta': user['storymaps'][id]})
except storage.StorageException, e:
traceback.print_exc()
return jsonify({'error': str(e), 'error_detail': e.detail})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/image/list/', methods=['GET', 'POST'])
@require_user_id()
def storymap_image_list(user, id):
"""List storymap images """
try:
key_prefix = storage.key_prefix(user['uid'], id, '_images')
key_list, more = storage.list_key_names(key_prefix, 999, '')
image_list = [n.split('/')[-1] for n in key_list]
return jsonify({'image_list': image_list})
except storage.StorageException, e:
traceback.print_exc()
return jsonify({'error': str(e), 'error_detail': e.detail})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
@app.route('/storymap/image/save/', methods=['POST'])
@require_user_id()
def storymap_image_save(user, id):
"""
Save storymap image
@id = storymap id
@name = file name
@content = data:URL representing the file's data as base64 encoded string
"""
try:
name, content = _request_get_required('name', 'content')
m = re.match('data:(.+);base64,(.+)', content)
if m:
content_type = m.group(1)
content = m.group(2).decode('base64')
else:
raise Exception('Expected content as data-url')
key_name = storage.key_name(user['uid'], id, '_images', name)
storage.save_from_data(key_name, content_type, content)
return jsonify({'url': settings.AWS_STORAGE_BUCKET_URL+key_name})
except storage.StorageException, e:
traceback.print_exc()
return jsonify({'error': str(e), 'error_detail': e.detail})
except Exception, e:
traceback.print_exc()
return jsonify({'error': str(e)})
#
# Views
#
@app.route("/")
def index():
return render_template('index.html')
@app.route("/gigapixel/")
def gigapixel():
return render_template('gigapixel.html')
@app.route("/advanced/")
def advanced():
return render_template('advanced.html')
@app.route("/examples/<name>/")
def examples(name):
return render_template('examples/%s.html' % name)
@app.route("/logout/")
def logout():
_session_pop('uid')
return redirect('https://www.google.com/accounts/Logout')
@app.route("/userinfo/")
def userinfo():
import pprint
uid = session.get('uid')
user = None
migrate_data = None
if uid:
user = _user.find_one({'uid': uid})
if user:
if not user['migrated']:
migrate_data = google.drive_get_migration_diagnostics(user)
del user['_id']
try:
del user['google']['credentials']
except KeyError: pass
user = pprint.pformat(user, indent=4)
migrate_data = pprint.pformat(migrate_data, indent=4)
return render_template('userinfo.html',
uid=uid, user=user, migrate_data=migrate_data)
@app.route("/select.html/", methods=['GET', 'POST'])
@app.route("/edit.html/", methods=['GET', 'POST'])
def legacy_redirect():
"""Legacy redirect"""
return redirect(url_for('select'))
@app.route("/select/", methods=['GET', 'POST'])
def select():
try:
uid = session.get('uid')
if not uid:
return render_template('select.html')
user = _user.find_one({'uid': uid})
if not user:
_session_pop('uid')
return render_template('select.html')
del user['_id']
return render_template('select.html', user=user)
except Exception, e:
traceback.print_exc()
return render_template('select.html', error=str(e))
@app.route("/edit/", methods=['GET', 'POST'])
@require_user_id('edit.html')
def edit(user, id):
try:
del user['_id'] # for serialization
return render_template('edit.html',
user=user, meta=user['storymaps'][id])
except Exception, e:
traceback.print_exc()
return render_template('edit.html', error=str(e))
@app.route('/admin/')
@require_user
def admin(user):
if not user['uid'] in settings.ADMINS:
abort(401)
return render_template('/admin/index.html')
@app.route('/admin/users/')
@require_user
def admin_users(user):
if not user['uid'] in settings.ADMINS:
abort(401)
args = request.args.copy()
page = int(args.pop('page', 1))
rpp = int(request.args.get('rpp', 100))
skip = (page - 1) * rpp
files = defaultdict(list)
users = []
query = {}
if args.get('uname'):
if args.get('unamesearch') == 'is':
query.update({ 'uname': args['uname'] })
else:
query.update({ 'uname':{'$regex': args['uname'], '$options': 'i'}})
if args.get('uid'):
query.update({ 'uid': args['uid'] })
migrated = args.get('migrated')
if migrated == 'migrated':
query.update({ 'migrated': 1 })
elif migrated == 'unmigrated':
query.update({ 'migrated': 0 })
for k in storage.all_keys():
uid = k.split('/')[1]
files[uid].append(k)
pages = 0
if query:
for u in _user.find(query, skip=skip, limit=rpp):
u.update({ 'files': files[u['uid']] })
users.append(u)
pages = int(math.ceil(_user.find(query).count() / rpp))
return render_template('admin/users.html', **{
'users': users,
'page': page,
'pages': pages,
'args': args,
'querystring': urllib.urlencode(args.items()),
'storage_root': settings.AWS_STORAGE_BUCKET_URL
})
@app.route('/admin/unmatched-files')
@require_user
def admin_unmatched_files(user):
if not user['uid'] in settings.ADMINS:
abort(401)
files = defaultdict(list)
users = []
for k in storage.all_keys():
uid = k.split('/')[1]
files[uid].append(k)
for u in _user.find():
try:
del files[u['uid']]
except KeyError:
pass
return _jsonify(files)
@app.route("/qunit/", methods=['GET'])
def qunit():
return render_template('qunit.html')
#
# FOR DEVELOPMENT
# SERVE URLS FROM DIRECTORIES
#
from flask import send_from_directory
build_dir = os.path.join(settings.PROJECT_ROOT, 'build')
compiled_dir = os.path.join(settings.PROJECT_ROOT, 'compiled')
templates_dir = os.path.join(settings.PROJECT_ROOT, 'compiled/templates')
@app.route('/build/embed/')
def catch_build_embed():
return send_from_directory(build_dir, 'embed/index.html')
@app.route('/build/<path:path>')
def catch_build(path):
return send_from_directory(build_dir, path)
@app.route('/compiled/<path:path>')
def catch_compiled(path):
return send_from_directory(compiled_dir, path)
@app.route('/editor/templates/<path:path>')
def catch_compiled_templates(path):
return send_from_directory(templates_dir, path)
# redirect old documentation URLs
@app.route('/<path:path>')
def redirect_old_urls(path):
if path.endswith('.html'):
return redirect(url_for(path.split('.')[0]))
abort(404)
if __name__ == '__main__':
import getopt
# Add current directory to sys.path
site_dir = os.path.dirname(os.path.abspath(__file__))
if site_dir not in sys.path:
sys.path.append(site_dir)
ssl_context = None
port = 5000
try:
opts, args = getopt.getopt(sys.argv[1:], "sp:", ["port="])
for opt, arg in opts:
if opt == '-s':
from OpenSSL import SSL
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
ssl_context.use_privatekey_file(os.path.join(site_dir, 'website', 'website.key'))
ssl_context.use_certificate_file(os.path.join(site_dir, 'website', 'website.crt'))
elif opt in ('-p', '--port'):
port = int(arg)
else:
print 'Usage: app.py [-s]'
sys.exit(1)
except getopt.GetoptError:
print 'Usage: app.py [-s] [-p port]'
sys.exit(1)
app.run(host='0.0.0.0', port=port, debug=True, ssl_context=ssl_context)
| mpl-2.0 | -3,265,961,351,194,360,000 | 28.89099 | 103 | 0.587563 | false |
midokura/python-midonetclient | src/midonetclient/neutron/port.py | 1 | 3343 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Midokura Europe SARL, All Rights Reserved.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ryu Ishimoto <[email protected]>, Midokura
import logging
from midonetclient import util
from midonetclient import vendor_media_type as mt
from midonetclient.neutron import bridge, router
LOG = logging.getLogger(__name__)
class PortUrlProviderMixin(bridge.BridgeUrlProviderMixin,
router.RouterUrlProviderMixin):
"""Port URL provider mixin
This mixin provides URLs for ports.
"""
def port_url(self, port_id):
return self.template_url("port_template", port_id)
def ports_url(self):
return self.resource_url("ports")
def bridge_ports_url(self, br_id):
return self.bridge_url(br_id) + "/ports"
def router_ports_url(self, rtr_id):
return self.router_url(rtr_id) + "/ports"
def link_url(self, port_id):
return self.port_url(port_id) + "/link"
class PortClientMixin(PortUrlProviderMixin):
"""Port mixin
Mixin that defines all the Neutron port operations in MidoNet API.
"""
@util.convert_case
def create_port(self, port):
LOG.info("create_port %r", port)
# convert_case converted to camel
if port["type"].lower() == "router":
url = self.router_ports_url(port["deviceId"])
else:
url = self.bridge_ports_url(port["deviceId"])
return self.client.post(url, mt.APPLICATION_PORT_JSON, body=port)
def delete_port(self, port_id):
LOG.info("delete_port %r", port_id)
self.client.delete(self.port_url(port_id))
@util.convert_case
def get_port(self, port_id, fields=None):
LOG.info("get_port %r", port_id)
return self.client.get(self.port_url(port_id),
mt.APPLICATION_PORT_JSON)
@util.convert_case
def get_ports(self, filters=None, fields=None, sorts=None, limit=None,
marker=None, page_reverse=False):
LOG.info("get_ports")
return self.client.get(self.ports_url(),
mt.APPLICATION_PORT_COLLECTION_JSON)
@util.convert_case
def update_port(self, port):
LOG.info("update_port %r", port)
return self.client.put(self.port_url(port["id"]),
mt.APPLICATION_PORT_JSON, port)
@util.convert_case
def link_port(self, link):
LOG.info("link_port %r", link)
# convert_case converted to camel
return self.client.post(self.link_url(link["portId"]),
mt.APPLICATION_PORT_LINK_JSON, body=link)
def unlink_port(self, port_id):
LOG.info("unlink_port %r", port_id)
self.client.delete(self.link_url(port_id))
| apache-2.0 | -2,826,083,030,494,583,300 | 32.43 | 75 | 0.642537 | false |
Fxrh/tispa-wm | libqtile/dgroups.py | 1 | 6283 | import itertools
import gobject
import libqtile.hook
from libqtile.config import Key
from libqtile.command import lazy
def simple_key_binder(mod, keynames=None):
"""
Bind keys to mod+group position or to the keys specified as
second argument.
"""
def func(dgroup):
# unbind all
for key in dgroup.keys[:]:
dgroup.qtile.unmapKey(key)
dgroup.keys.remove(key)
if keynames:
keys = keynames
else:
# keys 1 to 9 and 0
keys = map(str, range(1, 10) + [0])
# bind all keys
for keyname, group in zip(keys, dgroup.qtile.groups):
name = group.name
key = Key([mod], keyname, lazy.group[name].toscreen())
key_s = Key([mod, "shift"], keyname, lazy.window.togroup(name))
key_c = Key([mod, "control"], keyname,
lazy.group.switch_groups(name))
dgroup.keys.append(key)
dgroup.keys.append(key_s)
dgroup.keys.append(key_c)
dgroup.qtile.mapKey(key)
dgroup.qtile.mapKey(key_s)
dgroup.qtile.mapKey(key_c)
return func
class Rule(object):
""" A Rule contains a Match object, and a specification about what to do
when that object is matched. """
def __init__(self, match, group=None, float=False, intrusive=False):
"""
:param match: ``Match`` object associated with this ``Rule``
:param float: auto float this window?
:param intrusive: override the group's exclusive setting?
"""
self.match = match
self.group = group
self.float = float
self.intrusive = intrusive
def matches(self, w):
return self.match.compare(w)
class DGroups(object):
''' Dynamic Groups '''
def __init__(self, qtile, dgroups, key_binder=None, delay=1):
self.qtile = qtile
self.groups = dgroups
self.groupMap = {}
for group in self.groups:
self.groupMap[group.name] = group
self.rules = list(itertools.chain.from_iterable([g.rules for g in dgroups]))
for group in dgroups:
rules = [Rule(m, group=group.name) for m in group.matches]
self.rules.extend(rules)
self.keys = []
self.key_binder = key_binder
self._setup_hooks()
self._setup_groups()
self.delay = delay
self.timeout = {}
def _setup_groups(self):
for group in self.groups:
if group.init:
self.qtile.addGroup(group.name)
if group.spawn and not self.qtile.no_spawn:
self.qtile.cmd_spawn(group.spawn)
def _setup_hooks(self):
libqtile.hook.subscribe.client_new(self._add)
libqtile.hook.subscribe.client_killed(self._del)
if self.key_binder:
libqtile.hook.subscribe.setgroup(
lambda: self.key_binder(self))
libqtile.hook.subscribe.changegroup(
lambda: self.key_binder(self))
def shuffle_groups(self, lst, match):
masters = []
for client in lst:
if match.compare(client):
masters.append(client)
for master in masters:
lst.remove(master)
lst.insert(0, master)
def _add(self, client):
if client in self.timeout:
self.qtile.log.info('Remove dgroup source')
gobject.source_remove(self.timeout[client])
del(self.timeout[client])
# ignore static windows
if client.defunct:
return
group_set = False
intrusive = False
for rule in self.rules:
# Matching Rules
if rule.matches(client):
if rule.group:
group_added = self.qtile.addGroup(rule.group)
client.togroup(rule.group)
group_set = True
group_obj = self.qtile.groupMap[rule.group]
group = self.groupMap.get(rule.group)
if group:
if group_added:
layout = group.layout
ratio = group.ratio
if layout:
group_obj.layout = layout
if ratio:
group_obj.ratio = ratio
master = group.master
if master:
group_obj.layout.shuffle(
lambda lst: self.shuffle_groups(
lst, master))
if rule.float:
client.enablefloating()
if rule.intrusive:
intrusive = group.intrusive
# If app doesn't have a group
if not group_set:
current_group = self.qtile.currentGroup.name
if current_group in self.groups and\
self.groupMap[current_group].exclusive and\
not intrusive:
wm_class = client.window.get_wm_class()
if wm_class:
if len(wm_class) > 1:
wm_class = wm_class[1]
else:
wm_class = wm_class[0]
group_name = wm_class
else:
group_name = client.name
if not group_name:
group_name = "Unnamed"
self.qtile.addGroup(group_name)
client.togroup(group_name)
def _del(self, client):
group = client.group
def delete_client():
# Delete group if empty and dont persist
if group and \
self.groupMap[group.name] in self.groups and \
not self.groupMap[group.name].persist and \
len(group.windows) <= 0:
self.qtile.delGroup(group.name)
# wait the delay until really delete the group
self.qtile.log.info('Add dgroup timer')
self.timeout[client] = gobject.timeout_add_seconds(self.delay,
delete_client)
| gpl-3.0 | -4,683,357,861,695,413,000 | 31.554404 | 84 | 0.512017 | false |
chromium/chromium | tools/json_to_struct/element_generator_test.py | 5 | 8623 | #!/usr/bin/env python3
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from element_generator import GenerateFieldContent
from element_generator import GenerateElements
import unittest
class ElementGeneratorTest(unittest.TestCase):
def testGenerateIntFieldContent(self):
lines = [];
GenerateFieldContent('', {'type': 'int', 'default': 5}, None, lines, ' ',
{})
self.assertEquals([' 5,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'int', 'default': 5}, 12, lines, ' ', {})
self.assertEquals([' 12,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'int'}, -3, lines, ' ', {})
self.assertEquals([' -3,'], lines)
def testGenerateStringFieldContent(self):
lines = [];
GenerateFieldContent('', {'type': 'string', 'default': 'foo_bar'}, None,
lines, ' ', {})
self.assertEquals([' "foo_bar",'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string', 'default': 'foo'}, 'bar\n',
lines, ' ', {})
self.assertEquals([' "bar\\n",'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string'}, None, lines, ' ', {})
self.assertEquals([' NULL,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string'}, 'foo', lines, ' ', {})
self.assertEquals([' "foo",'], lines)
def testGenerateString16FieldContent(self):
lines = [];
GenerateFieldContent('', {'type': 'string16',
'default': u'f\u00d8\u00d81a'},
None, lines, ' ', {})
self.assertEquals([' L"f\\x00d8" L"\\x00d8" L"1a",'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string16', 'default': 'foo'},
u'b\uc3a5r', lines, ' ', {})
self.assertEquals([' L"b\\xc3a5" L"r",'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string16'}, None, lines, ' ', {})
self.assertEquals([' NULL,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'string16'}, u'foo\\u1234', lines, ' ',
{})
self.assertEquals([' L"foo\\\\u1234",'], lines)
def testGenerateEnumFieldContent(self):
lines = [];
GenerateFieldContent('', {'type': 'enum', 'default': 'RED'}, None, lines,
' ', {})
self.assertEquals([' RED,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'enum', 'default': 'RED'}, 'BLACK', lines,
' ', {})
self.assertEquals([' BLACK,'], lines)
lines = [];
GenerateFieldContent('', {'type': 'enum'}, 'BLUE', lines, ' ', {})
self.assertEquals([' BLUE,'], lines)
def testGenerateClassFieldContent(self):
lines = []
GenerateFieldContent('', {
'type': 'class',
'default': 'absl::nullopt'
}, None, lines, ' ', {})
self.assertEquals([' absl::nullopt,'], lines)
lines = []
GenerateFieldContent('', {
'type': 'class',
'default': 'absl::nullopt'
}, 'true', lines, ' ', {})
self.assertEquals([' true,'], lines)
lines = []
GenerateFieldContent('', {'type': 'class'}, 'false', lines, ' ', {})
self.assertEquals([' false,'], lines)
def testGenerateArrayFieldContent(self):
lines = ['STRUCT BEGINS'];
GenerateFieldContent('test', {'type': 'array', 'contents': {'type': 'int'}},
None, lines, ' ', {})
self.assertEquals(['STRUCT BEGINS', ' NULL,', ' 0,'], lines)
lines = ['STRUCT BEGINS'];
GenerateFieldContent('test', {'field': 'my_array', 'type': 'array',
'contents': {'type': 'int'}},
[3, 4], lines, ' ', {})
self.assertEquals('const int array_test_my_array[] = {\n' +
' 3,\n' +
' 4,\n' +
'};\n' +
'STRUCT BEGINS\n' +
' array_test_my_array,\n' +
' 2,', '\n'.join(lines))
lines = ['STRUCT BEGINS'];
GenerateFieldContent('test', {'field': 'my_array', 'type': 'array',
'contents': {'type': 'int'}},
[3, 4], lines, ' ', {'array_test_my_array': 1})
self.assertEquals('const int array_test_my_array_1[] = {\n' +
' 3,\n' +
' 4,\n' +
'};\n' +
'STRUCT BEGINS\n' +
' array_test_my_array_1,\n' +
' 2,', '\n'.join(lines))
def testGenerateElements(self):
schema = [
{'field': 'f0', 'type': 'int', 'default': 1000, 'optional': True},
{'field': 'f1', 'type': 'string'},
{'field': 'f2', 'type': 'enum', 'ctype': 'QuasiBool', 'default': 'MAYBE',
'optional': True},
{'field': 'f3', 'type': 'array', 'contents': {'type': 'string16'},
'optional': True},
{
'field': 'f4',
'type': 'struct',
'type_name': 'InnerType',
'fields': [
{'field': 'g0', 'type': 'string'}
],
'optional': True
},
{
'field': 'f5',
'type': 'array',
'contents': {
'type': 'struct',
'type_name': 'InnerType',
'fields': [
{'field': 'a0', 'type': 'string'},
{'field': 'a1', 'type': 'string'}
]
},
'optional': True
}
]
description = {
'int_variables': {'a': -5, 'b': 5},
'elements': {
'elem0': {'f0': 5, 'f1': 'foo', 'f2': 'SURE'},
'elem1': {'f2': 'NOWAY', 'f0': -2, 'f1': 'bar'},
'elem2': {'f1': 'foo_bar', 'f3': [u'bar', u'foo']},
'elem3': {'f1': 'foo', 'f4': {'g0': 'test'}},
'elem4': {'f1': 'foo', 'f5': [{'a0': 'test0', 'a1': 'test1'}]},
}
}
# Build the expected result stream based on the unpredicatble order the
# dictionary element are listed in.
int_variable_expected = {
'a': 'const int a = -5;\n',
'b': 'const int b = 5;\n',
}
elements_expected = {
'elem0': 'const MyType elem0 = {\n'
' 5,\n'
' "foo",\n'
' SURE,\n'
' NULL,\n'
' 0,\n'
' {0},\n'
' NULL,\n'
' 0,\n'
'};\n',
'elem1': 'const MyType elem1 = {\n'
' -2,\n'
' "bar",\n'
' NOWAY,\n'
' NULL,\n'
' 0,\n'
' {0},\n'
' NULL,\n'
' 0,\n'
'};\n',
'elem2': 'const wchar_t* const array_elem2_f3[] = {\n'
' L"bar",\n'
' L"foo",\n'
'};\n'
'const MyType elem2 = {\n'
' 1000,\n'
' "foo_bar",\n'
' MAYBE,\n'
' array_elem2_f3,\n'
' 2,\n'
' {0},\n'
' NULL,\n'
' 0,\n'
'};\n',
'elem3': 'const MyType elem3 = {\n'
' 1000,\n'
' "foo",\n'
' MAYBE,\n'
' NULL,\n'
' 0,\n'
' {\n'
' "test",\n'
' },\n'
' NULL,\n'
' 0,\n'
'};\n',
'elem4': 'const InnerType array_elem4_f5[] = {\n'
' {\n'
' "test0",\n'
' "test1",\n'
' },\n'
'};\n'
'const MyType elem4 = {\n'
' 1000,\n'
' "foo",\n'
' MAYBE,\n'
' NULL,\n'
' 0,\n'
' {0},\n'
' array_elem4_f5,\n'
' 1,\n'
'};\n'
}
expected = ''
for key, value in description['int_variables'].items():
expected += int_variable_expected[key]
expected += '\n'
elements = []
for key, value in description['elements'].items():
elements.append(elements_expected[key])
expected += '\n'.join(elements)
result = GenerateElements('MyType', schema, description)
self.assertEquals(expected, result)
def testGenerateElementsMissingMandatoryField(self):
schema = [
{'field': 'f0', 'type': 'int'},
{'field': 'f1', 'type': 'string'},
]
description = {
'int_variables': {'a': -5, 'b': 5},
'elements': {
'elem0': {'f0': 5},
}
}
self.assertRaises(RuntimeError,
lambda: GenerateElements('MyType', schema, description))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -692,229,356,921,500,400 | 32.815686 | 80 | 0.43987 | false |
liw/daos | src/tests/ftest/harness/advanced.py | 1 | 2765 | #!/usr/bin/python
"""
(C) Copyright 2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
from random import choice
from re import findall
from apricot import TestWithServers
from general_utils import run_pcmd
class HarnessAdvancedTest(TestWithServers):
"""Advanced harness test cases.
:avocado: recursive
"""
def test_core_files(self):
"""Test to verify core file creation.
This test will send a signal 6 to a random daos_engine process so
that it will create a core file, allowing the core file collection code
in launch.py to be tested.
This test can be run in any CI stage: vm, small, medium, large
:avocado: tags=all
:avocado: tags=harness,harness_advanced_test,test_core_files
"""
# Choose a server find the pid of its daos_engine process
host = choice(self.server_managers[0].hosts)
self.log.info("Obtaining pid of the daos_engine process on %s", host)
pid = None
result = run_pcmd([host], "pgrep --list-full daos_engine", 20)
index = 0
while not pid and index < len(result):
output = "\n".join(result[index]["stdout"])
match = findall(r"(\d+)\s+[A-Za-z0-9/]+", output)
if match:
pid = match[0]
index += 1
if pid is None:
self.fail(
"Error obtaining pid of the daos_engine process on "
"{}".format(host))
self.log.info("Found pid %s", pid)
# Send a signal 6 to its daos_engine process
self.log.info("Sending a signal 6 to %s", pid)
result = run_pcmd([host], "sudo kill -6 {}".format(pid))
if len(result) > 1 or result[0]["exit_status"] != 0:
self.fail("Error sending a signal 6 to {} on {}".format(pid, host))
# Display the journalctl log for the process that was sent the signal
self.server_managers[0].manager.dump_logs([host])
# Simplify resolving the host name to rank by marking all ranks as
# expected to be either running or errored (sent a signal 6)
self.server_managers[0].update_expected_states(
None, ["Joined", "Errored"])
def test_core_files_hw(self):
"""Test to verify core file creation.
This test will send a signal 6 to a random daos_engine process so
that it will create a core file, allowing the core file collection code
in launch.py to be tested.
This test can be run in any CI stage: vm, small, medium, large
:avocado: tags=all
:avocado: tags=hw,small,medium,ib2,large
:avocado: tags=harness,harness_advanced_test,test_core_files
"""
self.test_core_files()
| apache-2.0 | 8,386,738,577,204,103,000 | 34.909091 | 79 | 0.614105 | false |
jboissard/mathExperiments | fractals/timetables.py | 1 | 2204 | """
Generate timetable
https://www.youtube.com/watch?v=qhbuKbxJsk8
"""
import svgwrite
import numpy as np
(h,w) = (700, 1500)
svg = svgwrite.Drawing(filename = "timetable.svg", size = (str(w)+"px", str(h)+"px"))
def coordFromAngle(alpha):
x = cr*np.cos(alpha) + cx
y = cr*np.sin(alpha) + cy
return (x,y)
def addLine(c1, c2):
svg.add(svg.line(c1, c2, stroke=svgwrite.rgb(10, 10, 16, '%')))
def addCircle(c, r):
svg.add(svg.circle(
center = c, r=r,
stroke_width = "1",
stroke = "black",
fill = "white"
))
# calculate (x,y) coordinates for angles around circle
# param n: number of points
# param pts: list of points
def calculateCoordinate(pts):
n = len(pts)
coords = np.empty([n,2])
for (idx,angle) in enumerate(pts):
coords[idx] = coordFromAngle(angle)
return coords
def nrOnSegment(x,s):
return x >= 0 and x <= s
def pointIsInFrame(c):
return nrOnSegment(c[0], w) and nrOnSegment(c[1], h)
# calculates extended coordinates (lines come out of circle)
# arg c1, c2 two points on circle
def extendedCoordinates(c1, c2):
# slope
m = (c2[1]-c1[1])/(c2[0]-c1[0])
q = c2[1] - m*c2[0]
def y(x):
return m*x + q
def x(y):
return (y - q)/m
# calculates intersections with external boudary
d1 = (0, y(0))
d2 = (w, y(w))
d3 = (x(0), 0)
d4 = (x(h), h)
l = np.array([d1,d2,d3,d4])
# create array out
r = np.empty([2,2])
j = 0
for pt in l:
if pointIsInFrame(pt):
r[j] = pt
j += 1
return r
# main circle coorindates
(cx,cy,cr) = (w/2,h/2,280)
# add surrounding circle
addCircle((cx,cy), cr)
# number of points around circle
n = 1200 #1200
# muliplicative factor
# # intersting number (n,m)
# 100: 501, 999, 996, 59, 69, 499
# # 1200: 501
m = 59#999#501
angles = np.linspace(0, 2*np.pi, n+1)
#print angles
# get coords for all points
coords = calculateCoordinate(angles)
for (idx,angle) in enumerate(angles):
idx2 = idx*m%n
#addLine(coords[idx], coords[idx2])
c = extendedCoordinates(coords[idx], coords[idx2])
addLine(c[0], c[1])
# draw small circles on circle - interesting for small `n`
#for (idx,angle) in enumerate(angles):
# addCircle(coords[idx], 3)
svg.save() | apache-2.0 | 6,018,870,589,145,324,000 | 17.529412 | 85 | 0.624773 | false |
centrumholdings/buildbot | buildbot/steps/transfer.py | 1 | 16507 | # -*- test-case-name: buildbot.test.test_transfer -*-
import os.path, tarfile, tempfile
from twisted.internet import reactor
from twisted.spread import pb
from twisted.python import log
from buildbot.process.buildstep import RemoteCommand, BuildStep
from buildbot.process.buildstep import SUCCESS, FAILURE, SKIPPED
from buildbot.interfaces import BuildSlaveTooOldError
class _FileWriter(pb.Referenceable):
"""
Helper class that acts as a file-object with write access
"""
def __init__(self, destfile, maxsize, mode):
# Create missing directories.
destfile = os.path.abspath(destfile)
dirname = os.path.dirname(destfile)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.destfile = destfile
self.fp = open(destfile, "wb")
if mode is not None:
os.chmod(destfile, mode)
self.remaining = maxsize
def remote_write(self, data):
"""
Called from remote slave to write L{data} to L{fp} within boundaries
of L{maxsize}
@type data: C{string}
@param data: String of data to write
"""
if self.remaining is not None:
if len(data) > self.remaining:
data = data[:self.remaining]
self.fp.write(data)
self.remaining = self.remaining - len(data)
else:
self.fp.write(data)
def remote_close(self):
"""
Called by remote slave to state that no more data will be transfered
"""
self.fp.close()
self.fp = None
def __del__(self):
# unclean shutdown, the file is probably truncated, so delete it
# altogether rather than deliver a corrupted file
fp = getattr(self, "fp", None)
if fp:
fp.close()
os.unlink(self.destfile)
def _extractall(self, path=".", members=None):
"""Fallback extractall method for TarFile, in case it doesn't have its own."""
import copy
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(lambda a, b: cmp(a.name, b.name))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except tarfile.ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
class _DirectoryWriter(_FileWriter):
"""
A DirectoryWriter is implemented as a FileWriter, with an added post-processing
step to unpack the archive, once the transfer has completed.
"""
def __init__(self, destroot, maxsize, compress, mode):
self.destroot = destroot
self.fd, self.tarname = tempfile.mkstemp()
self.compress = compress
_FileWriter.__init__(self, self.tarname, maxsize, mode)
def remote_unpack(self):
"""
Called by remote slave to state that no more data will be transfered
"""
if self.fp:
self.fp.close()
self.fp = None
fileobj = os.fdopen(self.fd, 'rb')
if self.compress == 'bz2':
mode='r|bz2'
elif self.compress == 'gz':
mode='r|gz'
else:
mode = 'r'
if not hasattr(tarfile.TarFile, 'extractall'):
tarfile.TarFile.extractall = _extractall
archive = tarfile.open(name=self.tarname, mode=mode, fileobj=fileobj)
archive.extractall(path=self.destroot)
archive.close()
fileobj.close()
os.remove(self.tarname)
class StatusRemoteCommand(RemoteCommand):
def __init__(self, remote_command, args):
RemoteCommand.__init__(self, remote_command, args)
self.rc = None
self.stderr = ''
def remoteUpdate(self, update):
#log.msg('StatusRemoteCommand: update=%r' % update)
if 'rc' in update:
self.rc = update['rc']
if 'stderr' in update:
self.stderr = self.stderr + update['stderr'] + '\n'
class _TransferBuildStep(BuildStep):
"""
Base class for FileUpload and FileDownload to factor out common
functionality.
"""
DEFAULT_WORKDIR = "build" # is this redundant?
def setDefaultWorkdir(self, workdir):
if self.workdir is None:
self.workdir = workdir
def _getWorkdir(self):
properties = self.build.getProperties()
if self.workdir is None:
workdir = self.DEFAULT_WORKDIR
else:
workdir = self.workdir
return properties.render(workdir)
def finished(self, result):
# Subclasses may choose to skip a transfer. In those cases, self.cmd
# will be None, and we should just let BuildStep.finished() handle
# the rest
if result == SKIPPED:
return BuildStep.finished(self, SKIPPED)
if self.cmd.stderr != '':
self.addCompleteLog('stderr', self.cmd.stderr)
if self.cmd.rc is None or self.cmd.rc == 0:
return BuildStep.finished(self, SUCCESS)
return BuildStep.finished(self, FAILURE)
class FileUpload(_TransferBuildStep):
"""
Build step to transfer a file from the slave to the master.
arguments:
- ['slavesrc'] filename of source file at slave, relative to workdir
- ['masterdest'] filename of destination file at master
- ['workdir'] string with slave working directory relative to builder
base dir, default 'build'
- ['maxsize'] maximum size of the file, default None (=unlimited)
- ['blocksize'] maximum size of each block being transfered
- ['mode'] file access mode for the resulting master-side file.
The default (=None) is to leave it up to the umask of
the buildmaster process.
"""
name = 'upload'
def __init__(self, slavesrc, masterdest,
workdir=None, maxsize=None, blocksize=16*1024, mode=None,
**buildstep_kwargs):
BuildStep.__init__(self, **buildstep_kwargs)
self.addFactoryArguments(slavesrc=slavesrc,
masterdest=masterdest,
workdir=workdir,
maxsize=maxsize,
blocksize=blocksize,
mode=mode,
)
self.slavesrc = slavesrc
self.masterdest = masterdest
self.workdir = workdir
self.maxsize = maxsize
self.blocksize = blocksize
assert isinstance(mode, (int, type(None)))
self.mode = mode
def start(self):
version = self.slaveVersion("uploadFile")
properties = self.build.getProperties()
if not version:
m = "slave is too old, does not know about uploadFile"
raise BuildSlaveTooOldError(m)
source = properties.render(self.slavesrc)
masterdest = properties.render(self.masterdest)
# we rely upon the fact that the buildmaster runs chdir'ed into its
# basedir to make sure that relative paths in masterdest are expanded
# properly. TODO: maybe pass the master's basedir all the way down
# into the BuildStep so we can do this better.
masterdest = os.path.expanduser(masterdest)
log.msg("FileUpload started, from slave %r to master %r"
% (source, masterdest))
self.step_status.setText(['uploading', os.path.basename(source)])
# we use maxsize to limit the amount of data on both sides
fileWriter = _FileWriter(masterdest, self.maxsize, self.mode)
# default arguments
args = {
'slavesrc': source,
'workdir': self._getWorkdir(),
'writer': fileWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
}
self.cmd = StatusRemoteCommand('uploadFile', args)
d = self.runCommand(self.cmd)
d.addCallback(self.finished).addErrback(self.failed)
class DirectoryUpload(BuildStep):
"""
Build step to transfer a directory from the slave to the master.
arguments:
- ['slavesrc'] name of source directory at slave, relative to workdir
- ['masterdest'] name of destination directory at master
- ['workdir'] string with slave working directory relative to builder
base dir, default 'build'
- ['maxsize'] maximum size of the compressed tarfile containing the
whole directory
- ['blocksize'] maximum size of each block being transfered
- ['compress'] compression type to use: one of [None, 'gz', 'bz2']
"""
name = 'upload'
def __init__(self, slavesrc, masterdest,
workdir="build", maxsize=None, blocksize=16*1024,
compress=None, **buildstep_kwargs):
BuildStep.__init__(self, **buildstep_kwargs)
self.addFactoryArguments(slavesrc=slavesrc,
masterdest=masterdest,
workdir=workdir,
maxsize=maxsize,
blocksize=blocksize,
compress=compress,
)
self.slavesrc = slavesrc
self.masterdest = masterdest
self.workdir = workdir
self.maxsize = maxsize
self.blocksize = blocksize
assert compress in (None, 'gz', 'bz2')
self.compress = compress
def start(self):
version = self.slaveVersion("uploadDirectory")
properties = self.build.getProperties()
if not version:
m = "slave is too old, does not know about uploadDirectory"
raise BuildSlaveTooOldError(m)
source = properties.render(self.slavesrc)
masterdest = properties.render(self.masterdest)
# we rely upon the fact that the buildmaster runs chdir'ed into its
# basedir to make sure that relative paths in masterdest are expanded
# properly. TODO: maybe pass the master's basedir all the way down
# into the BuildStep so we can do this better.
masterdest = os.path.expanduser(masterdest)
log.msg("DirectoryUpload started, from slave %r to master %r"
% (source, masterdest))
self.step_status.setText(['uploading', os.path.basename(source)])
# we use maxsize to limit the amount of data on both sides
dirWriter = _DirectoryWriter(masterdest, self.maxsize, self.compress, 0600)
# default arguments
args = {
'slavesrc': source,
'workdir': self.workdir,
'writer': dirWriter,
'maxsize': self.maxsize,
'blocksize': self.blocksize,
'compress': self.compress
}
self.cmd = StatusRemoteCommand('uploadDirectory', args)
d = self.runCommand(self.cmd)
d.addCallback(self.finished).addErrback(self.failed)
def finished(self, result):
# Subclasses may choose to skip a transfer. In those cases, self.cmd
# will be None, and we should just let BuildStep.finished() handle
# the rest
if result == SKIPPED:
return BuildStep.finished(self, SKIPPED)
if self.cmd.stderr != '':
self.addCompleteLog('stderr', self.cmd.stderr)
if self.cmd.rc is None or self.cmd.rc == 0:
return BuildStep.finished(self, SUCCESS)
return BuildStep.finished(self, FAILURE)
class _FileReader(pb.Referenceable):
"""
Helper class that acts as a file-object with read access
"""
def __init__(self, fp):
self.fp = fp
def remote_read(self, maxlength):
"""
Called from remote slave to read at most L{maxlength} bytes of data
@type maxlength: C{integer}
@param maxlength: Maximum number of data bytes that can be returned
@return: Data read from L{fp}
@rtype: C{string} of bytes read from file
"""
if self.fp is None:
return ''
data = self.fp.read(maxlength)
return data
def remote_close(self):
"""
Called by remote slave to state that no more data will be transfered
"""
if self.fp is not None:
self.fp.close()
self.fp = None
class FileDownload(_TransferBuildStep):
"""
Download the first 'maxsize' bytes of a file, from the buildmaster to the
buildslave. Set the mode of the file
Arguments::
['mastersrc'] filename of source file at master
['slavedest'] filename of destination file at slave
['workdir'] string with slave working directory relative to builder
base dir, default 'build'
['maxsize'] maximum size of the file, default None (=unlimited)
['blocksize'] maximum size of each block being transfered
['mode'] use this to set the access permissions of the resulting
buildslave-side file. This is traditionally an octal
integer, like 0644 to be world-readable (but not
world-writable), or 0600 to only be readable by
the buildslave account, or 0755 to be world-executable.
The default (=None) is to leave it up to the umask of
the buildslave process.
"""
name = 'download'
def __init__(self, mastersrc, slavedest,
workdir=None, maxsize=None, blocksize=16*1024, mode=None,
**buildstep_kwargs):
BuildStep.__init__(self, **buildstep_kwargs)
self.addFactoryArguments(mastersrc=mastersrc,
slavedest=slavedest,
workdir=workdir,
maxsize=maxsize,
blocksize=blocksize,
mode=mode,
)
self.mastersrc = mastersrc
self.slavedest = slavedest
self.workdir = workdir
self.maxsize = maxsize
self.blocksize = blocksize
assert isinstance(mode, (int, type(None)))
self.mode = mode
def start(self):
properties = self.build.getProperties()
version = self.slaveVersion("downloadFile")
if not version:
m = "slave is too old, does not know about downloadFile"
raise BuildSlaveTooOldError(m)
# we are currently in the buildmaster's basedir, so any non-absolute
# paths will be interpreted relative to that
source = os.path.expanduser(properties.render(self.mastersrc))
slavedest = properties.render(self.slavedest)
log.msg("FileDownload started, from master %r to slave %r" %
(source, slavedest))
self.step_status.setText(['downloading', "to",
os.path.basename(slavedest)])
# setup structures for reading the file
try:
fp = open(source, 'rb')
except IOError:
# if file does not exist, bail out with an error
self.addCompleteLog('stderr',
'File %r not available at master' % source)
# TODO: once BuildStep.start() gets rewritten to use
# maybeDeferred, just re-raise the exception here.
reactor.callLater(0, BuildStep.finished, self, FAILURE)
return
fileReader = _FileReader(fp)
# default arguments
args = {
'slavedest': slavedest,
'maxsize': self.maxsize,
'reader': fileReader,
'blocksize': self.blocksize,
'workdir': self._getWorkdir(),
'mode': self.mode,
}
self.cmd = StatusRemoteCommand('downloadFile', args)
d = self.runCommand(self.cmd)
d.addCallback(self.finished).addErrback(self.failed)
| gpl-2.0 | -1,128,012,311,006,084,400 | 34.121277 | 83 | 0.585691 | false |
mbalasso/mynumpy | numpy/polynomial/tests/test_legendre.py | 1 | 16656 | """Tests for legendre module.
"""
from __future__ import division
import numpy as np
import numpy.polynomial.legendre as leg
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
L0 = np.array([ 1])
L1 = np.array([ 0, 1])
L2 = np.array([-1, 0, 3])/2
L3 = np.array([ 0, -3, 0, 5])/2
L4 = np.array([ 3, 0, -30, 0, 35])/8
L5 = np.array([ 0, 15, 0, -70, 0, 63])/8
L6 = np.array([-5, 0, 105, 0,-315, 0, 231])/16
L7 = np.array([ 0,-35, 0, 315, 0, -693, 0, 429])/16
L8 = np.array([35, 0,-1260, 0,6930, 0,-12012, 0,6435])/128
L9 = np.array([ 0,315, 0,-4620, 0,18018, 0,-25740, 0,12155])/128
Llist = [L0, L1, L2, L3, L4, L5, L6, L7, L8, L9]
def trim(x) :
return leg.legtrim(x, tol=1e-6)
class TestConstants(TestCase) :
def test_legdomain(self) :
assert_equal(leg.legdomain, [-1, 1])
def test_legzero(self) :
assert_equal(leg.legzero, [0])
def test_legone(self) :
assert_equal(leg.legone, [1])
def test_legx(self) :
assert_equal(leg.legx, [0, 1])
class TestArithmetic(TestCase) :
x = np.linspace(-1, 1, 100)
def test_legadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = leg.legadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legsub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = leg.legsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_legmulx(self):
assert_equal(leg.legmulx([0]), [0])
assert_equal(leg.legmulx([1]), [0,1])
for i in range(1, 5):
tmp = 2*i + 1
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i/tmp, 0, (i + 1)/tmp]
assert_equal(leg.legmulx(ser), tgt)
def test_legmul(self) :
# check values of result
for i in range(5) :
pol1 = [0]*i + [1]
val1 = leg.legval(self.x, pol1)
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
pol2 = [0]*j + [1]
val2 = leg.legval(self.x, pol2)
pol3 = leg.legmul(pol1, pol2)
val3 = leg.legval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_legdiv(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = leg.legadd(ci, cj)
quo, rem = leg.legdiv(tgt, ci)
res = leg.legadd(leg.legmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase) :
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_legval(self) :
#check empty input
assert_equal(leg.legval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1,1)
y = [polyval(x, c) for c in Llist]
for i in range(10) :
msg = "At i=%d" % i
ser = np.zeros
tgt = y[i]
res = leg.legval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(leg.legval(x, [1]).shape, dims)
assert_equal(leg.legval(x, [1,0]).shape, dims)
assert_equal(leg.legval(x, [1,0,0]).shape, dims)
def test_legval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = leg.legval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = leg.legval2d(z, z, self.c2d)
assert_(res.shape == (2,3))
def test_legval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = leg.legval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = leg.legval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_leggrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = leg.leggrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = leg.leggrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_leggrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = leg.leggrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2,3))
res = leg.leggrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase) :
def test_legint(self) :
# check exceptions
assert_raises(ValueError, leg.legint, [0], .5)
assert_raises(ValueError, leg.legint, [0], -1)
assert_raises(ValueError, leg.legint, [0], 1, [0,0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = leg.legint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i])
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(leg.legval(-1, legint), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
legpol = leg.poly2leg(pol)
legint = leg.legint(legpol, m=1, k=[i], scl=2)
res = leg.leg2poly(legint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1)
res = leg.legint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k])
res = leg.legint(pol, m=j, k=range(j))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k], lbnd=-1)
res = leg.legint(pol, m=j, k=range(j), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = leg.legint(tgt, m=1, k=[k], scl=2)
res = leg.legint(pol, m=j, k=range(j), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_legint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legint(c) for c in c2d.T]).T
res = leg.legint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c) for c in c2d])
res = leg.legint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legint(c, k=3) for c in c2d])
res = leg.legint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase) :
def test_legder(self) :
# check exceptions
assert_raises(ValueError, leg.legder, [0], .5)
assert_raises(ValueError, leg.legder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [0]*i + [1]
res = leg.legder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [0]*i + [1]
res = leg.legder(leg.legint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_legder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([leg.legder(c) for c in c2d.T]).T
res = leg.legder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([leg.legder(c) for c in c2d])
res = leg.legder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_legvander(self) :
# check for 1d x
x = np.arange(3)
v = leg.legvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = leg.legvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[..., i], leg.legval(x, coef))
def test_legvander2d(self) :
# also tests polyval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = leg.legvander2d(x1, x2, [1, 2])
tgt = leg.legval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_legvander3d(self) :
# also tests polyval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = leg.legvander3d(x1, x2, x3, [1, 2, 3])
tgt = leg.legval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = leg.legvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_legfit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, leg.legfit, [1], [1], -1)
assert_raises(TypeError, leg.legfit, [[1]], [1], 0)
assert_raises(TypeError, leg.legfit, [], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [[[1]]], 0)
assert_raises(TypeError, leg.legfit, [1, 2], [1], 0)
assert_raises(TypeError, leg.legfit, [1], [1, 2], 0)
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, leg.legfit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = leg.legfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(leg.legval(x, coef3), y)
#
coef4 = leg.legfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(leg.legval(x, coef4), y)
#
coef2d = leg.legfit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = leg.legfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = leg.legfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(leg.legfit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
assert_raises(ValueError, leg.legcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(leg.legcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = leg.leggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = leg.legvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:,None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 2.0
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase) :
def test_legfromroots(self) :
res = leg.legfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = leg.legfromroots(roots)
res = leg.legval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(leg.leg2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_legroots(self) :
assert_almost_equal(leg.legroots([1]), [])
assert_almost_equal(leg.legroots([1, 2]), [-.5])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = leg.legroots(leg.legfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_legtrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, leg.legtrim, coef, -1)
# Test results
assert_equal(leg.legtrim(coef), coef[:-1])
assert_equal(leg.legtrim(coef, 1), coef[:-3])
assert_equal(leg.legtrim(coef, 2), [0])
def test_legline(self) :
assert_equal(leg.legline(3,4), [3, 4])
def test_leg2poly(self) :
for i in range(10) :
assert_almost_equal(leg.leg2poly([0]*i + [1]), Llist[i])
def test_poly2leg(self) :
for i in range(10) :
assert_almost_equal(leg.poly2leg(Llist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)
tgt = 1.
res = leg.legweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 6,226,740,885,202,528,000 | 31.15444 | 75 | 0.495617 | false |
psi29a/django-authopenid | django_authopenid/urls.py | 1 | 3016 | # -*- coding: utf-8 -*-
# Copyright 2007, 2008,2009 by Benoît Chesneau <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
from django.views.generic import TemplateView
# views
from django.contrib.auth import views as auth_views
from django_authopenid import views as oid_views
from registration import views as reg_views
urlpatterns = patterns('',
# django registration activate
url(r'^activate/complete/$',
TemplateView.as_view(template_name='registration/activation_complete.html'),
name='registration_activation_complete'),
url(r'^activate/(?P<activation_key>\w+)/$',
reg_views.activate,
{'backend': 'registration.backends.default.DefaultBackend'},
name='registration_activate'),
# user profile
url(r'^password/reset/$', auth_views.password_reset, name='auth_password_reset'),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
auth_views.password_reset_confirm,
name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='auth_password_reset_complete'),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
name='auth_password_reset_done'),
url(r'^password/$',oid_views.password_change, name='auth_password_change'),
# manage account registration
url(r'^associate/complete/$', oid_views.complete_associate, name='user_complete_associate'),
url(r'^associate/$', oid_views.associate, name='user_associate'),
url(r'^dissociate/$', oid_views.dissociate, name='user_dissociate'),
url(r'^register/$', oid_views.register, name='user_register'),
url(r'^signout/$', oid_views.signout, name='user_signout'),
url(r'^signin/complete/$', oid_views.complete_signin, name='user_complete_signin'),
url(r'^signin/$', oid_views.signin, name='user_signin'),
url(r'^signup/$',
reg_views.register,
{'backend': 'registration.backends.default.DefaultBackend'},
name='registration_register'),
url(r'^signup/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
name='registration_complete'),
url(r'^signup/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
# yadis uri
url(r'^yadis.xrdf$', oid_views.xrdf, name='oid_xrdf'),
)
| apache-2.0 | 6,625,465,566,086,668,000 | 42.071429 | 96 | 0.688557 | false |
xupingmao/xnote | core/xauth.py | 1 | 13386 | # encoding=utf-8
import os
import hashlib
import copy
import web
import xconfig
import xutils
import xmanager
import warnings
import time
from xutils import ConfigParser, textutil, dbutil, fsutil
from xutils import Storage
from xutils.functions import listremove
dbutil.register_table("user", "用户信息表")
dbutil.register_table("session", "用户会话信息")
dbutil.register_table("user_session_rel", "用户会话关系")
# 用户配置
_users = None
NAME_LENGTH_MIN = 4
INVALID_NAMES = fsutil.load_set_config("./config/user/invalid_names.list")
MAX_SESSION_SIZE = 20
SESSION_EXPIRE = 24 * 3600 * 7
PRINT_DEBUG_LOG = False
def log_debug(fmt, *args):
if PRINT_DEBUG_LOG:
print("[xauth]", fmt.format(*args))
def is_valid_username(name):
"""有效的用户名为字母+数字"""
if name in INVALID_NAMES:
return False
if len(name) < NAME_LENGTH_MIN:
return False
return name.isalnum()
def _create_temp_user(temp_users, user_name):
temp_users[user_name] = Storage(name = user_name,
password = "123456",
salt = "",
mtime = "",
token = gen_new_token())
def _get_users(force_reload = False):
"""获取用户,内部接口"""
global _users
# 有并发风险
if _users is not None and not force_reload:
return _users
temp_users = {}
# 初始化默认的用户
_create_temp_user(temp_users, "admin")
_create_temp_user(temp_users, "test")
user_list = dbutil.prefix_list("user")
for user in user_list:
if user.name is None:
xutils.trace("UserList", "invalid user %s" % user)
continue
if isinstance(user.config, dict):
user.config = Storage(**user.config)
else:
user.config = Storage()
name = user.name.lower()
temp_users[name] = user
_users = temp_users
return _users
def get_users():
"""获取所有用户,返回一个深度拷贝版本"""
return copy.deepcopy(_get_users())
def list_user_names():
users = _get_users()
return list(users.keys())
def refresh_users():
xutils.trace("ReLoadUsers", "reload users")
return _get_users(force_reload = True)
def get_user(name):
warnings.warn("get_user已经过时,请使用 get_user_by_name", DeprecationWarning)
return find_by_name(name)
def get_user_by_name(user_name):
return find_by_name(user_name)
def create_uuid():
import uuid
return uuid.uuid4().hex
def get_valid_session_by_id(sid):
session_info = dbutil.get("session:%s" % sid)
if session_info is None:
return None
if session_info.user_name is None:
dbutil.delete("session:%s" % sid)
return None
if time.time() > session_info.expire_time:
dbutil.delete("session:%s" % sid)
return None
return session_info
def list_user_session_id(user_name):
session_id_list = dbutil.get("user_session_rel:%s" % user_name)
if session_id_list is None:
return []
expire_id_set = set()
for sid in session_id_list:
session_info = get_valid_session_by_id(sid)
if session_info is None:
expire_id_set.add(sid)
for sid in expire_id_set:
listremove(session_id_list, sid)
return session_id_list
def list_user_session_detail(user_name):
session_id_list = list_user_session_id(user_name)
session_detail_list = []
for sid in session_id_list:
detail = get_valid_session_by_id(sid)
if detail != None:
session_detail_list.append(detail)
return session_detail_list
def create_user_session(user_name, expires = SESSION_EXPIRE, login_ip = None):
user_detail = get_user_by_name(user_name)
if user_detail is None:
raise Exception("user not found: %s" % user_name)
session_id = create_uuid()
session_id_list = list_user_session_id(user_name)
if len(session_id_list) > MAX_SESSION_SIZE:
# TODO 踢出最早的登录
raise Exception("user login too many devices: %s" % user_name)
# 保存用户和会话关系
session_id_list.append(session_id)
dbutil.put("user_session_rel:%s" % user_name, session_id_list)
# 保存会话信息
session_info = Storage(user_name = user_name,
sid = session_id,
token = user_detail.token,
login_time = time.time(),
login_ip = login_ip,
expire_time = time.time() + expires)
dbutil.put("session:%s" % session_id, session_info)
print("session_info:", session_info)
return session_id
def delete_user_session_by_id(sid):
# 登录的时候会自动清理无效的sid关系
dbutil.delete("session:%s" % sid)
def find_by_name(name):
if name is None:
return None
users = _get_users()
name = name.lower()
user_info = users.get(name)
if user_info != None:
return Storage(**user_info)
return None
def get_user_config_dict(name):
user = get_user(name)
if user != None:
if user.config is None:
user.config = Storage()
return user.config
return None
def get_user_config(user_name, config_key):
config_dict = get_user_config_dict(user_name)
if config_dict is None:
return None
return config_dict.get(config_key)
def update_user_config_dict(name, config_dict):
user = get_user(name)
if user is None:
return
config = get_user_config_dict(name)
config.update(**config_dict)
user.config = config
update_user(name, user)
def select_first(filter_func):
users = _get_users()
for item in users.values():
if filter_func(item):
return item
def get_user_from_token():
token = xutils.get_argument("token")
if token != None and token != "":
return select_first(lambda x: x.token == token)
def get_user_password(name):
users = _get_users()
return users[name]["password"]
def get_user_password_md5(user_name, use_salt = True):
user = get_user(user_name)
password = user.password
salt = user.salt
if use_salt:
return encode_password(password, salt)
else:
return encode_password(password, None)
def get_session_id_from_cookie():
cookies = web.cookies()
return cookies.get("sid")
def get_user_from_cookie():
session_id = get_session_id_from_cookie()
session_info = get_valid_session_by_id(session_id)
if session_info is None:
return None
log_debug("get_user_from_cookie: sid={}, session_info={}", session_id, session_info)
return get_user_by_name(session_info.user_name)
def get_current_user():
if xconfig.IS_TEST:
return get_user("test")
user = get_user_from_token()
if user != None:
return user
if not hasattr(web.ctx, "env"):
# 尚未完成初始化
return None
return get_user_from_cookie()
def current_user():
return get_current_user()
def get_current_name():
"""获取当前用户名"""
user = get_current_user()
if user is None:
return None
return user.get("name")
def current_name():
return get_current_name()
def get_current_role():
"""获取当前用户的角色"""
user = get_current_user()
if user is None:
return None
name = user.get("name")
if name == "admin":
return "admin"
else:
return "user"
def current_role():
return get_current_role()
def get_md5_hex(pswd):
pswd_md5 = hashlib.md5()
pswd_md5.update(pswd.encode("utf-8"))
return pswd_md5.hexdigest()
def encode_password(password, salt):
# 加上日期防止cookie泄露意义不大
# 考虑使用session失效检测或者定时提醒更新密码
# password = password + xutils.format_date()
if password is None:
password = ""
pswd_md5 = hashlib.md5()
pswd_md5.update(password.encode("utf-8"))
if salt != None:
pswd_md5.update(salt.encode("utf-8"))
return pswd_md5.hexdigest()
def write_cookie_old(name):
web.setcookie("xuser", name, expires= 24*3600*30)
pswd_md5 = get_user_password_md5(name)
web.setcookie("xpass", pswd_md5, expires=24*3600*30)
def write_cookie(user_name):
session_id = create_user_session(user_name)
web.setcookie("sid", session_id)
def get_user_cookie(name):
session_list = list_user_session_detail(name)
if len(session_list) == 0:
sid = create_user_session(name, login_ip = "system")
else:
sid = session_list[0].sid
return "sid=%s" % sid
def gen_new_token():
import uuid
return uuid.uuid4().hex
def create_user(name, password):
if name == "" or name == None:
return dict(code = "PARAM_ERROR", message = "name为空")
if password == "" or password == None:
return dict(code = "PARAM_ERROR", message = "password为空")
if not is_valid_username(name):
return dict(code = "INVALID_NAME", message="非法的用户名")
name = name.lower()
found = find_by_name(name)
if found is not None:
return dict(code = "fail", message = "用户已存在")
else:
user = Storage(name=name,
password=password,
token=gen_new_token(),
ctime=xutils.format_time(),
salt=textutil.random_string(6),
mtime=xutils.format_time())
dbutil.put("user:%s" % name, user)
xutils.trace("UserAdd", name)
refresh_users()
return dict(code = "success", message = "create success")
def update_user(name, user):
if name == "" or name == None:
return
name = name.lower()
mem_user = find_by_name(name)
if mem_user is None:
raise Exception("user not found")
password_new = user.get("password")
password_old = mem_user.get("password")
mem_user.update(user)
mem_user.mtime = xutils.format_time()
if password_new != None and password_old != password_new:
# 修改密码
mem_user.salt = textutil.random_string(6)
mem_user.token = gen_new_token()
dbutil.put("user:%s" % name, mem_user)
xutils.trace("UserUpdate", mem_user)
refresh_users()
# 刷新完成之后再发送消息
xmanager.fire("user.update", dict(user_name = name))
def delete_user(name):
if name == "admin":
return
name = name.lower()
dbutil.delete("user:%s" % name)
refresh_users()
def has_login_by_cookie_old(name = None):
cookies = web.cookies()
name_in_cookie = cookies.get("xuser")
pswd_in_cookie = cookies.get("xpass")
# TODO 不同地方调用结果不一致
# print(name, name_in_cookie)
if name is not None and name_in_cookie != name:
return False
name = name_in_cookie
if name == "" or name is None:
return False
user = get_user_by_name(name)
if user is None:
return False
password_md5 = encode_password(user["password"], user["salt"])
return password_md5 == pswd_in_cookie
def has_login_by_cookie(name = None):
cookies = web.cookies()
session_id = cookies.get("sid")
session_info = get_valid_session_by_id(session_id)
if session_info is None:
return False
name_in_cookie = session_info.user_name
log_debug("has_login_by_cookie: name={}, name_in_cookie={}", name, name_in_cookie)
if name is not None and name_in_cookie != name:
return False
name = name_in_cookie
if name == "" or name is None:
return False
user = get_user_by_name(name)
if user is None:
return False
return user.token == session_info.token
def has_login(name=None):
"""验证是否登陆
如果``name``指定,则只能该用户名通过验证
"""
if xconfig.IS_TEST:
return True
# 优先使用token
user = get_user_from_token()
if user != None:
if name is None:
return True
return user.get("name") == name
return has_login_by_cookie(name)
def is_admin():
return xconfig.IS_TEST or has_login("admin")
def check_login(user_name=None):
if has_login(user_name):
return
if has_login():
xutils.log("unauthorized visit, user:%s, url:%s" % (user_name, web.ctx.path))
raise web.seeother("/unauthorized")
# 跳转到登陆URL
redirect_to_login()
def redirect_to_login():
path = web.ctx.fullpath
raise web.seeother("/login?target=" + xutils.encode_uri_component(path))
def login_required(user_name=None):
"""管理员验证装饰器"""
def deco(func):
def handle(*args, **kw):
check_login(user_name)
ret = func(*args, **kw)
return ret
return handle
return deco
def get_user_data_dir(user_name, mkdirs = False):
fpath = os.path.join(xconfig.DATA_DIR, "files", user_name)
if mkdirs:
fsutil.makedirs(fpath)
return fpath
def login_user_by_name(user_name, login_ip = None):
session_id = create_user_session(user_name, login_ip = login_ip)
web.setcookie("sid", session_id)
# 更新最近的登录时间
update_user(user_name, dict(login_time=xutils.format_datetime()))
def logout_current_user():
sid = get_session_id_from_cookie()
delete_user_session_by_id(sid)
xutils.register_func("user.get_config_dict", get_user_config_dict)
xutils.register_func("user.get_config", get_user_config)
| gpl-3.0 | -575,988,175,758,931,840 | 25.05668 | 88 | 0.621271 | false |
dosaboy/basejmpr | basejmpr/domain/utils.py | 1 | 7510 | # Author: Edward Hope-Morley ([email protected])
# Description: QEMU Base Image Management Utility
# Copyright (C) 2017 Edward Hope-Morley
#
# License:
#
# This file is part of basejmpr.
#
# basejmpr is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# basejmpr is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with basejmpr. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import shutil
import subprocess
import uuid
import tempfile
from jinja2 import Environment, PackageLoader
def render_templates(ctxt, dom_path, dom_templates, local_path,
local_templates):
env = Environment()
env.loader = PackageLoader('basejmpr.domain', 'templates')
# Expect to fail if exists
os.makedirs(dom_path)
for t in dom_templates:
rt = env.get_template(t).render(**ctxt)
with open(os.path.join(dom_path, t), 'w') as fd:
fd.write(rt)
for t in local_templates:
rt = env.get_template(t).render(**ctxt)
with open(os.path.join(local_path, t), 'w') as fd:
fd.write(rt)
def domain_exists(name):
out = subprocess.check_output(['virsh', 'list', '--all'])
key = re.compile(r' %s ' % name)
result = re.search(key, out)
return result is not None and result.group(0).strip() == name
def create_domains(root, base_root, revision, series, num_domains,
base_revisions, domain_name_prefix, root_disk_size,
ssh_lp_user, domain_memory, domain_vcpus, domain_boot_order,
networks, domain_disks, domain_apt_proxy,
domain_init_script, domain_user_data, domain_meta_data,
domain_net_config, domain_disk_bus,
force=False, skip_seed=False, skip_backingfile=False,
skip_cleanup=False, snap_dict=None):
rev = None
if revision:
rev = revision
else:
revs = sorted([int(idx) for idx in base_revisions.keys()],
reverse=True)
for r in revs:
r = str(r)
_series = base_revisions[r]['targets'][0].partition('-')[0]
if series and series == _series:
rev = r
break
if not rev:
raise Exception("No revision found for series '{}'".format(series))
backingfile = os.path.join(base_root, rev,
base_revisions[rev]['files'][0])
if not num_domains:
num_domains = 1
name = domain_name_prefix or str(uuid.uuid4())
for n in xrange(num_domains):
if num_domains > 1:
dom_name = '{}{}'.format(name, n)
else:
dom_name = name
dom_path = os.path.join(root, dom_name)
imgpath = os.path.join(dom_path, '{}.img'.format(dom_name))
seedpath = os.path.join(dom_path, '{}-seed.img'.format(dom_name))
print "INFO: creating domain '{}'".format(dom_name)
if os.path.isdir(dom_path):
if not force:
print("WARNING: domain path '{}' already exists - skipping "
"create".format(dom_path))
continue
else:
print("INFO: domain path '{}' already exists - "
"overwriting".format(dom_path))
shutil.rmtree(dom_path)
elif domain_exists(dom_name) and not force:
print("WARNING: domain '{}' already exists - skipping "
"create".format(dom_name))
continue
ctxt = {'name': dom_name,
'ssh_user': ssh_lp_user,
'backingfile': backingfile,
'img_path': imgpath,
'seed_path': seedpath,
'mem': domain_memory,
'vcpus': domain_vcpus,
'root_size': root_disk_size,
'boot_order': domain_boot_order,
'classic_snaps': snap_dict.get('classic'),
'stable_snaps': snap_dict.get('stable'),
'networks': networks.split(','),
'apt_proxy': domain_apt_proxy}
if skip_backingfile:
del ctxt['backingfile']
if skip_seed:
del ctxt['seed_path']
ctxt['primary_disk'] = {'bus': domain_disk_bus}
if domain_disks:
disks = []
for i in xrange(domain_disks):
disks.append({'name': 'disk%s' % (i), 'size': '100G',
'bus': domain_disk_bus})
ctxt['disks'] = disks
local_templates = ['snap_install.sh']
dom_templates = ['create-new.sh']
if not skip_seed:
if not domain_user_data:
dom_templates += ['user-data']
if domain_meta_data:
ctxt['network_config'] = 'meta-data'
elif domain_net_config:
ctxt['network_config'] = '--network-config network-config'
else:
ctxt['network_config'] = 'meta-data'
dom_templates += ['meta-data']
tmpdir = tempfile.mkdtemp()
try:
render_templates(ctxt, dom_path, dom_templates, tmpdir,
local_templates)
if not skip_seed:
for input, tgt in {domain_user_data: 'user-data',
domain_meta_data: 'meta-data'}.iteritems():
if input:
tgt = os.path.join(dom_path, tgt)
shutil.copy(input, tgt)
write_multipart = False
cmd = ['write-mime-multipart',
'--output={}/user-data.tmp'.format(tmpdir),
'{}/user-data'.format(dom_path)]
if any(snap_dict.values()):
write_multipart = True
cmd.append('{}/snap_install.sh:text/x-shellscript'
.format(tmpdir))
if domain_init_script:
write_multipart = True
cmd.append('{}:text/x-shellscript'
.format(domain_init_script))
if write_multipart:
subprocess.check_output(cmd)
shutil.copy(os.path.join(tmpdir, 'user-data.tmp'),
os.path.join(dom_path, 'user-data'))
if domain_net_config:
shutil.copy(domain_net_config,
os.path.join(dom_path, 'network-config'))
except:
if not skip_cleanup:
shutil.rmtree(tmpdir)
raise
os.chmod(os.path.join(dom_path, 'create-new.sh'), 0o0755)
try:
os.chdir(dom_path)
with open('/dev/null') as fd:
subprocess.check_call(['./create-new.sh'], stdout=fd,
stderr=fd)
except:
print("\nERROR: domain '{}' create unsuccessful: deleting "
"{}".format(dom_name, dom_path))
if not skip_cleanup:
shutil.rmtree(dom_path)
raise
| gpl-3.0 | -7,168,975,571,664,807,000 | 35.280193 | 79 | 0.529427 | false |
go-hep/hep | hbook/rootcnv/testdata/make-root-h1.py | 1 | 1188 | # Copyright ©2017 The go-hep Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from array import array as carray
import ROOT
f = ROOT.TFile.Open("gauss-h1.root","RECREATE")
histos = []
for t in [
(ROOT.TH1D, "h1d", (10,-4,4)),
(ROOT.TH1F, "h1f", (10,-4,4)),
(ROOT.TH1F, "h1d-var", (10, carray("d", [
-4.0, -3.2, -2.4, -1.6, -0.8, 0,
+0.8, +1.6, +2.4, +3.2, +4.0
]))),
(ROOT.TH1F, "h1f-var", (10, carray("f", [
-4.0, -3.2, -2.4, -1.6, -0.8, 0,
+0.8, +1.6, +2.4, +3.2, +4.0
])))
]:
cls, name, args = t
if len(args) == 3:
h = cls(name, name, args[0], args[1], args[2])
elif len(args) == 2:
h = cls(name, name, args[0], args[1])
else:
raise ValueError("invalid number of arguments %d" % len(args))
h.StatOverflows(True)
h.Sumw2()
with open("gauss-1d-data.dat") as ff:
for l in ff.readlines():
x, w = l.split()
h.Fill(float(x),float(w))
pass
pass
histos.append(h)
pass
f.Write()
f.Close()
| bsd-3-clause | 9,136,496,270,084,065,000 | 28.675 | 70 | 0.493682 | false |
open-power-sdk/migration-advisor | ma/checkers/api_ipp_checker.py | 1 | 1388 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 IBM Corporation
Licensed under the Apache License, Version 2.0 (the “License”);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
* Daniel Kreling <[email protected]>
"""
import ma.checkers.checker_file_utils as utils
from ma.checkers.checker import Checker
class ApiIppChecker(Checker):
""" Checker for Integrated Performance Primitives (IPP) API """
def __init__(self):
self.problem_type = "Integrated Performance Primitives (IPP) API"
self.problem_msg = "x86 API not supported in Power"
self.api_ipp_includes = ["ipp.h"]
self.hint = "ipp.*"
def get_pattern_hint(self):
return self.hint
def get_problem_msg(self):
return self.problem_msg
def get_problem_type(self):
return self.problem_type
def check_include(self, include_name):
if include_name in self.api_ipp_includes:
return True
| apache-2.0 | -7,480,850,964,663,516,000 | 29 | 73 | 0.7 | false |
mjirik/lisa | lisa/virtual_resection.py | 1 | 29738 | #! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import os.path
import sys
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/sed3/"))
# from ..extern.sed3 import sed3
# import featurevector
from loguru import logger
# logger = logging.getLogger()
import numpy as np
import scipy.ndimage
# import vtk
import argparse
# @TODO remove logger debug message from the header
logger.debug("before morphology import")
from skimage import morphology
# from PyQt4 import QtCore, QtGui
# from PyQt4.QtGui import *
# from PyQt4.QtCore import Qt
# from PyQt4.QtGui import QApplication
# from PyQt4.QtGui import QApplication, QMainWindow, QWidget,\
# QGridLayout, QLabel, QPushButton, QFrame, QFileDialog,\
# QFont, QInputDialog, QComboBox, QRadioButton, QButtonGroup
# ----------------- my scripts --------
from . import misc
import sed3
# import show3
from . import qmisc
from . import data_manipulation
import imma.image_manipulation as ima
def resection(data, name=None, method='PV',
interactivity=True, seeds=None, **kwargs):
"""
Main resection function.
:param data: dictionaru with data3d, segmentation and slab key.
:param method: "PV", "planar"
:param interactivity: True or False, use seeds if interactivity is False
:param seeds: used as initial interactivity state
:param kwargs: other parameters for resection algorithm
:return:
"""
if method is 'PV':
return resection_old(data, interactivity=interactivity, seeds=seeds)
elif method is 'planar':
return resection_planar(data, interactivity=interactivity, seeds=seeds)
elif method is "PV_new":
return resection_portal_vein_new(data, interactivity=interactivity, seeds=seeds, organ_label=data["slab"]["liver"], vein_label=data["slab"]["porta"])
# return resection_portal_vein_new(data, interactivity=interactivity, seeds=seeds, **kwargs)
else:
return resection_with_3d_visualization(data, **kwargs)
def Rez_podle_roviny(plane, data, voxel):
a = plane.GetNormal()[0] * voxel[0]
b = plane.GetNormal()[1] * voxel[1]
c = plane.GetNormal()[2] * voxel[2]
xx = plane.GetOrigin()[0] / voxel[0]
yy = plane.GetOrigin()[1] / voxel[1]
zz = plane.GetOrigin()[2] / voxel[2]
d = -(a * xx) - (b * yy) - (c * zz)
mensi = 0
vetsi = 0
mensi_objekt = 0
vetsi_objekt = 0
print('x: ', a, ' y: ', b, ' z: ', c)
print('Pocitani rezu...')
prava_strana = np.ones((data.shape[0], data.shape[1], data.shape[2]))
leva_strana = np.ones((data.shape[0], data.shape[1], data.shape[2]))
dimension = data.shape
for x in range(dimension[0]):
for y in range(dimension[1]):
for z in range(dimension[2]):
rovnice = a * x + b * y + c * z + d
if((rovnice) <= 0):
mensi = mensi + 1
if(data[x][y][z] == 1):
mensi_objekt = mensi_objekt + 1
leva_strana[x][y][z] = 0
else:
vetsi = vetsi + 1
if(data[x][y][z] == 1):
vetsi_objekt = vetsi_objekt + 1
prava_strana[x][y][z] = 0
leva_strana = leva_strana * data
objekt = mensi_objekt + vetsi_objekt
odstraneni_procenta = ((100 * mensi_objekt) / objekt)
print(leva_strana)
return leva_strana, odstraneni_procenta
# ----------------------------------------------------------
def cut_editor_old(data, label=None):
logger.debug("editor input label: " + str(label))
if label is None:
contour=data['segmentation']
else:
if type(label) == str:
label = data['slab'][label]
contour=(data['segmentation'] == label).astype(np.int8)
pyed = sed3.sed3qt(data['data3d'], contour=contour)
pyed.exec_()
return pyed.seeds
def split_vessel(datap, seeds, vessel_volume_threshold=0.95, dilatation_iterations=1, input_label="porta",
output_label1 = 1, output_label2 = 2, input_seeds_cut_label=1,
input_seeds_separate_label=3,
input_seeds_label2=None,
method="reach volume",
):
"""
:param datap: data plus format with data3d, segmentation, slab ...
:param seeds: 3d ndarray same size as data3d, label 1 is place where should be vessel cuted. Label 2 points to
the vessel with output label 1 after the segmentation
:param vessel_volume_threshold: this parameter defines the iteration stop rule if method "reach volume is selected
:param dilatation_iterations:
:param input_label: which vessel should be splited
:param output_label1: output label for vessel part marked with right button (if it is used)
:param output_label2: ouput label for not-marked vessel part
:param method: "separate labels" or "reach volume". The first method needs 3 input seeds and it is more stable.
:param input_seeds_separate_label: after the segmentation the object containing this label in seeds would be labeled with
output_label1
:param input_seeds_label2: This parameter is usedf the method is "separate labels". After the
segmentation the object containing this label in seeds would be labeled with output_label1.
:return:
"""
split_obj0 = (seeds == input_seeds_cut_label).astype(np.int8)
split_obj = split_obj0.copy()
# numeric_label = imma.get_nlabel(datap["slab"], input_label)
if method == "separate labels":
input_label = np.max(datap["segmentation"][seeds == input_seeds_label2])
vessels = ima.select_labels(datap["segmentation"], input_label, slab=datap["slab"])
# if type(input_label) is str:
# numeric_label = datap['slab'][input_label]
# else:
# numeric_label = input_label
# vessels = datap['segmentation'] == numeric_label
vesselstmp = vessels
sumall = np.sum(vessels == 1)
# split_obj = scipy.ndimage.binary_dilation(split_obj, iterations = 5 )
# vesselstmp = vessels * (1 - split_obj)
lab, n_obj = scipy.ndimage.label(vesselstmp)
logger.debug("number of objects " + str(n_obj))
# while n_obj < 2 :
# dokud neni z celkoveho objektu ustipnuto alespon 80 procent
not_complete = True
while not_complete:
if method == "reach volume":
not_complete = np.sum(lab == qmisc.max_area_index(lab, n_obj)) > (vessel_volume_threshold * sumall)
elif method == "separate labels":
# misc.
# imma.get_nlabel(datap["slab"], )
# imma.select_labels(seeds,input_seeds_separate_label)
seglab1 = np.max(lab[seeds == input_seeds_separate_label])
seglab2 = np.max(lab[seeds == input_seeds_label2])
if (seglab1 > 0) and (seglab2 > 0) and (seglab1 != seglab2):
not_complete = False
else:
IOError("Unknown method " + str(method))
split_obj = scipy.ndimage.binary_dilation(split_obj, iterations=dilatation_iterations)
vesselstmp = vessels * (1 - split_obj)
lab, n_obj = scipy.ndimage.label(vesselstmp)
if method == "reach volume":
# všechny objekty, na které se to rozpadlo
# pyed = sed3.sed3(lab)
# pyed.show()
obj1 = get_biggest_object(lab)
# vymaz nejvetsiho
lab[obj1 == 1] = 0
obj2 = get_biggest_object(lab)
pixel = 0
pixels = obj1[seeds == input_seeds_separate_label]
if len(pixels) > 0:
pixel = pixels[0]
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace() # BREAKPOINT
if pixel > 0:
ol1 = output_label1
ol2 = output_label2
else:
ol2 = output_label1
ol1 = output_label2
# first selected pixel with right button
lab = ol1 * obj1 + ol2 * obj2
elif method == "separate labels":
lab = (lab == seglab1) * output_label1 + (lab == seglab2) * output_label2
cut_by_user = split_obj0
return lab, cut_by_user
def Resekce_podle_bodu(data, seeds):
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
data = virtual_resection_visualization(data, segm, dist1, dist2, cut)
return data
def cut_editor(data, inputfile):
# @TODO ošetřit modul viewer viz issue #69
import viewer3
# global normal,coordinates
viewer = viewer3.Viewer(inputfile, 'View')
# zobrazovani jater v kodu
viewer.prohlizej(data, 'View', 'liver')
# mesh = viewer.generate_mesh(segmentation,voxelsize_mm,degrad)
# viewer.View(mesh,False)
# viewer.buttons(window,grid)
# print(viewer.normal)
# print(viewer.coordinates)
'''
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
labels = []
segmentation = segmentation[::degrad,::degrad,::degrad]
print("Generuji data...")
segmentation = segmentation[:,::-1,:]
mesh_data = seg2fem.gen_mesh_from_voxels_mc(segmentation,
voxelsize_mm*degrad)
print("Done")
if True:
mesh_data.coors = seg2fem.smooth_mesh(mesh_data)
vtk_file = "mesh_geom.vtk"
mesh_data.write(vtk_file)
app = QApplication(sys.argv)
#view = viewer3.QVTKViewer(vtk_file,'Cut')
'''
# normal = viewer3.normal_and_coordinates().set_normal()
# coordinates = viewer3.normal_and_coordinates().set_coordinates()
# return normal,coordinates
pass
def change(data, name):
# data['segmentation'][vessels == 2] = data['slab']['porta']
segmentation = data['segmentation']
cut_editor(segmentation == data['slab'][name])
def velikosti(a):
# a_index = [0, 0, 0]
# for x in range(0, len(a)):
# for y in range(0, len(a[0])):
# for z in range(0, len(a[0][0])):
# if a[x][y][z] == 1:
# a_index[0] += 1
# elif a[x][y][z] == 2:
# a_index[1] += 1
# elif a[x][y][z] == 3:
# a_index[2] += 1
mx = np.max(a)
a_index = []
for i in range(1, 4): # for i in range(1, mx + 1):
sm = np.sum(a == i)
a_index.append(sm)
return a_index
def nejnizsi(a, b, c):
if a > b:
if b > c:
return 3
else:
return 2
elif b > c:
if c > a:
return 1
else:
return 3
elif c > a:
if a > b:
return 2
else:
return 1
else:
print("chyba")
def resection_portal_vein_new(data, interactivity=False, seeds=None, organ_label=1, vein_label=2):
"""
New function for portal vein segmentation
:param data:
:param interactivity:
:param seeds:
:param kwargs:
:return:
"""
# ed = sed3.sed3(a)
# ed.show()
# from PyQt4 import QtGui
# from PyQt4.QtGui import QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QFrame, \
# QFont, QPixmap, QFileDialog
#
# window = QtGui.QWidget()
# mainLayout = QVBoxLayout()
# window.setLayout(mainLayout)
# mainLayout.addWidget(sed3.sed3qtWidget(data['data3d'], contour=data['segmentation']))
# zachovani puvodnich dat
segmentation = data["segmentation"]
data3d = data["data3d"]
# data pouze se segmentacemi
segm = ((data["segmentation"] == organ_label) * organ_label +
(data["segmentation"] == vein_label) * vein_label)
# ed = sed3.sed3(segm)
# ed.show()
# ufiknutí segmentace
crinfo = qmisc.crinfo_from_specific_data(segm, [0])
data["segmentation"] = qmisc.crop(segm, crinfo)
data["data3d"] = qmisc.crop(data3d, crinfo)
if seeds is not None:
seeds = qmisc.crop(seeds, crinfo)
# @TODO zde nahradit střeve čímkoliv smysluplnějším
if interactivity:
print("Select cut")
# seeds = cut_editor_old(data)
seeds = cut_editor_old(data)
elif seeds is None:
logger.error('seeds is None and interactivity is False')
return None
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
# jatra rozdeleny na 3 kusy
a = morphology.label(segm, background=0)
### podmínka nefunguje
if 3 in a: # zda se v segmentaci objevuje 3. cast
print("slape :) :) :P")
a_index = velikosti(segm)
print(a_index)
i = nejnizsi(a_index[0], a_index[1], a_index[2])
segm = ((a == i) * (segm == 1).astype('int8') +
(a != i)*(segm == 2).astype('int8') +
(segm != 0).astype('int8'))
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
# vrácení původních dat a spojení s upravenými daty
data["data3d"] = data3d
# orig_shape = (len(segmentation), len(segmentation[0]), len(segmentation[1]))
data["segmentation"] = qmisc.uncrop(data["segmentation"], crinfo, orig_shape=segmentation.shape)
#segmentation = segmentation == vein
data["segmentation"] = (data["segmentation"] +
(segmentation != organ_label) * segmentation) - (segmentation == vein_label) * vein_label
return data
def resection_old(data, interactivity=True, seeds=None):
if interactivity:
print("Select cut")
seeds = cut_editor_old(data)
elif seeds is None:
logger.error('seeds is None and interactivity is False')
return None
logger.debug("unique(seeds) " + str(np.unique(seeds)))
# seeds[56][60][78] = 1
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
return data
def resection_planar(data, interactivity, seeds=None):
"""
Based on input seeds the cutting plane is constructed
:param data:
:param interactivity:
:param seeds:
:return:
"""
if seeds is None:
if interactivity:
print("Select cut")
seeds = cut_editor_old(data)
else:
logger.error('seeds is None and interactivity is False')
return None
segm, dist1, dist2 = split_organ_by_plane(data, seeds)
cut = dist1**2 < 2
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
return data
def split_organ_by_plane(data, seeds):
"""
Based on seeds split nonzero segmentation with plane
:param data:
:param seeds:
:return:
"""
from . import geometry3d
from . import data_manipulation
l1 = 1
l2 = 2
point, vector = geometry3d.plane_fit(seeds.nonzero())
dist1 = data_manipulation.split_with_plane(point, vector, data['data3d'].shape)
dist2 = dist1 * -1
segm = (((data['segmentation'] != 0) * (dist1 < dist2)).astype('int8') +
(data['segmentation'] != 0).astype('int8'))
return segm, dist1, dist2
def split_tissue_on_labeled_tree(labeled_branches,
trunk_label, branch_labels,
tissue_segmentation, neighbors_list=None,
ignore_labels=None,
ignore_trunk=True,
on_missed_branch="split",
):
"""
Based on pre-labeled vessel tree split surrounding tissue into two part.
The connected sub tree is computed and used internally.
:param labeled_branches: ndimage with labeled volumetric vessel tree.
:param trunk_label: int
:param branch_labels: list of ints
:param tissue_segmentation: ndimage with bool type. Organ is True, the rest is False.
:param ignore_trunk: True or False
:param ignore_labels: list of labels which will be ignored
:param on_missed_branch: str, ["split", "organ_label", exception]. Missed label is label directly connected
to trunk but with no branch label inside.
"split" will ignore mised label.
"orig" will leave the original area label.
"exception", will throw the exception.
:return:
"""
# bl = lisa.virtual_resection.branch_labels(oseg, "porta")
import imma.measure
import imma.image_manipulation
import imma.image_manipulation as ima
if ignore_labels is None:
ignore_labels = []
ignore_labels = list(ignore_labels)
if ignore_trunk:
ignore_labels.append(trunk_label)
if neighbors_list is None:
exclude = [0]
exclude.extend(ignore_labels)
neighbors_list = imma.measure.neighbors_list(
labeled_branches,
None,
# [seglabel1, seglabel2, seglabel3],
exclude=exclude)
#exclude=[imma.image_manipulation.get_nlabels(slab, ["liver"]), 0])
# ex
# print(neighbors_list)
# find whole branche
# segmentations = [None] * len(branch_labels)
segmentation = np.zeros_like(labeled_branches, dtype=int)
new_branches = []
connected = [None] * len(branch_labels)
for i, branch_label in enumerate(branch_labels):
import copy
ignore_other_branches = copy.copy(branch_labels)
ignore_other_branches.pop(i)
ignore_labels_i = [0]
ignore_labels_i.extend(ignore_other_branches)
ignore_labels_i.extend(ignore_labels)
connected_i = imma.measure.get_connected_labels(
neighbors_list, branch_label, ignore_labels_i)
# segmentations[i] = ima.select_labels(labeled_branches, connected_i).astype(np.int8)
select = ima.select_labels(labeled_branches, connected_i).astype(np.int8)
select = select > 0
if np.max(segmentation[select]) > 0:
logger.debug("Missing branch connected to branch and other branch or trunk.")
union = (segmentation * select) > 0
segmentation[select] = i + 1
if on_missed_branch == "split":
segmentation[union] = 0
elif on_missed_branch == "orig":
new_branche_label = len(branch_labels) + len(new_branches) + 1
logger.debug("new branch label {}".format(new_branche_label))
segmentation[union] = new_branche_label
new_branches.append(new_branche_label)
elif on_missed_branch == "exception":
raise ValueError("Missing one vessel")
else:
raise ValueError("Unknown 'on_missed_label' parameter.")
else:
segmentation[select] = i + 1
# error
# else:
# segmentation[select] = i + 1
connected[i] = connected_i
seg = segmentation
# if np.max(np.sum(segmentations, 0)) > 1:
# raise ValueError("Missing one vessel")
#
# for i, branch_label in enumerate(branch_labels):
# segmentations[i] = segmentations[i] * (i + 1)
# seg = np.sum(segmentations, 0)
# ignore_labels1 = [0, trunk_label, branch_label2]
# ignore_labels1.extend(ignore_labels)
# ignore_labels2 = [0, trunk_label, branch_label]
# ignore_labels2.extend(ignore_labels)
# connected2 = imma.measure.get_connected_labels(
# neighbors_list, branch_label, ignore_labels1)
# connected3 = imma.measure.get_connected_labels(
# neighbors_list, branch_label2, ignore_labels2)
#
# # seg = ima.select_labels(segmentation, organ_label, slab).astype(np.int8)
# seg1 = ima.select_labels(labeled_branches, connected2).astype(np.int8)
# seg2 = ima.select_labels(labeled_branches, connected3).astype(np.int8)
# seg = seg1 + seg2 * 2
# if np.max(seg) > 2:
# ValueError("Missing one vessel")
dseg = ima.distance_segmentation(seg)
logger.debug("output unique labels {}".format(np.unique(dseg)))
# organseg = ima.select_labels(segmentation, organ_label, slab).astype(np.int8)
dseg[~tissue_segmentation.astype(np.bool)] = 0
return dseg, connected
def split_organ_by_two_vessels(datap,
seeds, organ_label=1,
seed_label1=1, seed_label2=2,
weight1=1, weight2=1):
"""
Input of function is ndarray with 2 labeled vessels and data.
Output is segmented organ by vessls using minimum distance criterium.
:param datap: dictionary with 3d data, segmentation, and other information
"data3d": 3d-ndarray with intensity data
"voxelsize_mm",
"segmentation": 3d ndarray with image segmentation
"slab": segmentation labels
:param seeds: ndarray with same size as data3d
1: first part of portal vein (or defined in seed1_label)
2: second part of portal vein (or defined in seed2_label)
:param weight1: distance weight from seed_label1
:param weight2: distance weight from seed_label2
"""
weight1 = 1 if weight1 is None else weight1
slab = datap["slab"]
segmentation = datap["segmentation"]
if type(seed_label1) != list:
seed_label1 = [seed_label1]
if type(seed_label2) != list:
seed_label2 = [seed_label2]
# dist se tady počítá od nul jenom v jedničkách
dist1 = scipy.ndimage.distance_transform_edt(
1 - ima.select_labels(seeds, seed_label1, slab),
# seeds != seed_label1,
sampling=datap['voxelsize_mm']
)
dist2 = scipy.ndimage.distance_transform_edt(
1 - ima.select_labels(seeds, seed_label2, slab),
# seeds != seed_label2,
sampling=datap['voxelsize_mm']
)
# import skfmm
# dist1 = skfmm.distance(
# labeled != l1,
# dx=datap['voxelsize_mm']
# )
# dist2 = skfmm.distance(
# labeled != l2,
# dx=datap['voxelsize_mm']
# )
# print 'skfmm'
# from PyQt4.QtCore import pyqtRemoveInputHook; pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace() # BREAKPOINT
# segm = (dist1 < dist2) * (data['segmentation'] != data['slab']['none'])
target_organ_segmentation = ima.select_labels(segmentation, organ_label, slab)
segm = ((target_organ_segmentation * ((dist1 / weight1) > (dist2 / weight2))).astype('int8') +
target_organ_segmentation.astype('int8'))
return segm, dist1, dist2
def virtual_resection_visualization(data, segm, dist1, dist2, cut,
interactivity=True):
v1, v2 = liver_spit_volume_mm3(segm, data['voxelsize_mm'])
if interactivity:
print("Liver volume: %.4g l" % ((v1 + v2) * 1e-6))
print("volume1: %.4g l (%.3g %%)" % (
(v1) * 1e-6, 100 * v1 / (v1 + v2)))
print("volume2: %.4g l (%.3g %%)" % (
(v2) * 1e-6, 100 * v2 / (v1 + v2)))
# pyed = sed3.sed3(segm)
# pyed.show()
# import pdb; pdb.set_trace()
linie = (((data['segmentation'] != 0) *
(np.abs(dist1 - dist2) < 1))).astype(np.int8)
linie_vis = 2 * linie
linie_vis[cut == 1] = 1
linie_vis = linie_vis.astype(np.int8)
if interactivity:
pyed = sed3.sed3qt(
data['data3d'],
seeds=linie_vis,
contour=(data['segmentation'] != 0))
# pyed.show()
pyed.exec_()
# import pdb; pdb.set_trace()
# show3.show3(data['segmentation'])
slab = {
'liver': 1,
'porta': 2,
'resected_liver': 3,
'resected_porta': 4}
slab.update(data['slab'])
data['slab'] = slab
data['slab']['resected_liver'] = 3
data['slab']['resected_porta'] = 4
mask_resected_liver = (
(segm == 1) & (data['segmentation'] == data['slab']['liver']))
mask_resected_porta = (
(segm == 1) & (data['segmentation'] == data['slab']['porta']))
data['segmentation'][mask_resected_liver] = \
data['slab']['resected_liver']
data['segmentation'][mask_resected_porta] = \
data['slab']['resected_porta']
logger.debug('resection_old() end')
return data
def resection_with_3d_visualization(data, name):
# data['segmentation'][vessels == 2] = data['slab']['porta']
# segmentation = data['segmentation']
# print(data['slab'])
change(data, name)
# print data["slab"]
# change(segmentation == data['slab']['porta'])
# lab = cut_editor(segmentation == data['slab']['porta'])
def get_biggest_object(data):
return qmisc.get_one_biggest_object(data)
def liver_spit_volume_mm3(segm, voxelsize_mm):
"""
segm: 0 - nothing, 1 - remaining tissue, 2 - resected tissue
"""
voxelsize_mm3 = np.prod(voxelsize_mm)
v1 = np.sum(segm == 1) * voxelsize_mm3
v2 = np.sum(segm == 2) * voxelsize_mm3
return v1, v2
def View(name):
data = misc.obj_from_file("out", filetype='pickle')
resection(data, name)
def label_volumetric_vessel_tree(oseg, vessel_label=None, write_to_oseg=True, new_label_str_format="{}{:03d}"):
"""
Split vessel by branches and put it in segmentation and slab.
:param oseg: OrganSegmentation object with segmentation, voxelsize_mm and slab
:param vessel_label: int or string label with vessel. Everything above zero is used if vessel_label is set None.
:param write_to_oseg: Store output into oseg.segmentation if True. The slab is also updated.
:param new_label_str_format: format of new slab
:return:
"""
logger.debug("vessel_label {}".format(vessel_label))
logger.debug("python version {} {}".format(sys.version_info, sys.executable))
import skelet3d
if vessel_label is None:
vessel_volume = oseg.segmentation > 0
else:
vessel_volume = oseg.select_label(vessel_label)
# print(np.unique(vessel_volume))
skel = skelet3d.skelet3d(vessel_volume)
skan = skelet3d.SkeletonAnalyser(skel, volume_data=vessel_volume)
skan.skeleton_analysis()
bl = skan.get_branch_label()
un = np.unique(bl)
logger.debug("skelet3d branch label min: {}, max: {}, dtype: {}".format(np.min(bl), np.max(bl), bl.dtype))
if write_to_oseg:
if 127 < np.max(bl) and ((oseg.segmentation.dtype == np.int8) or (oseg.segmentation.dtype == np.uint8)):
oseg.segmentation = oseg.segmentation.astype(np.int16)
for lb in un:
if lb != 0:
new_slabel = new_label_str_format.format(vessel_label, lb)
new_nlabel = oseg.nlabels(new_slabel)
oseg.segmentation[bl == lb] = new_nlabel
# ima.distance_segmentation(oseg.select_label(vessel_label))
return bl
if __name__ == "__main__":
# # logger = logging.getLogger()
# logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# SectorDisplay2__()
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(description='Segment vessels from liver')
parser.add_argument('-pkl', '--picklefile',
help='input file from organ_segmentation')
parser.add_argument('-oe', '--use_old_editor', action='store_true',
help='use an old editor for vessel cut')
parser.add_argument('-o', '--outputfile', default=None,
help='output file')
parser.add_argument('-oo', '--defaultoutputfile', action='store_true',
help='"vessels.pickle" as output file')
parser.add_argument('-d', '--debug', action='store_true',
help='Debug mode')
args = parser.parse_args()
if (args.picklefile or args.vtkfile) is None:
raise IOError('No input data!')
data = misc.obj_from_file(args.picklefile, filetype='pickle')
ds = data['segmentation'] == data['slab']['liver']
pozice = np.where(ds == 1)
a = pozice[0][0]
b = pozice[1][0]
c = pozice[2][0]
ds = False
# print "vs ", data['voxelsize_mm']
# print "vs ", data['voxelsize_mm']
if args.debug:
logger.setLevel(logging.DEBUG)
# seg = np.zeros([100,100,100])
# seg [50:80, 50:80, 60:75] = 1
# seg[58:60, 56:72, 66:68]=2
# dat = np.random.rand(100,100,100)
# dat [50:80, 50:80, 60:75] = dat [50:80, 50:80, 60:75] + 1
# dat [58:60, 56:72, 66:68] = dat [58:60, 56:72, 66:68] + 1
# slab = {'liver':1, 'porta':2, 'portaa':3, 'portab':4}
# data = {'segmentation':seg, 'data3d':dat, 'slab':slab}
name = 'porta'
# cut_editor(data,args.inputfile)
if args.use_old_editor:
resection(data, name, method=args.use_old_editor)
else:
cut_editor(data, args.picklefile)
# print normal
# print coordinates
defaultoutputfile = "05-resection.pkl"
if args.defaultoutputfile:
args.outputfile = defaultoutputfile
if args.outputfile is None:
savestring = raw_input('Save output data? (y/n): ')
if savestring in ['Y', 'y']:
misc.obj_to_file(data, defaultoutputfile, filetype='pickle')
else:
misc.obj_to_file(data, args.outputfile, filetype='pickle')
| bsd-3-clause | -7,583,558,748,580,240,000 | 34.361905 | 157 | 0.601737 | false |
metabrainz/listenbrainz-server | listenbrainz/db/user.py | 1 | 19931 | import logging
from typing import List
import sqlalchemy
import uuid
import ujson
from datetime import datetime
from listenbrainz import db
from listenbrainz.db.exceptions import DatabaseException
from data.model.similar_user_model import SimilarUsers
from typing import Tuple, List
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def create(musicbrainz_row_id: int, musicbrainz_id: str, email: str = None) -> int:
"""Create a new user.
Args:
musicbrainz_row_id (int): the MusicBrainz row ID of the user
musicbrainz_id (str): MusicBrainz username of a user.
email (str): email of the user
Returns:
ID of newly created user.
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
INSERT INTO "user" (musicbrainz_id, musicbrainz_row_id, auth_token, email)
VALUES (:mb_id, :mb_row_id, :token, :email)
RETURNING id
"""), {
"mb_id": musicbrainz_id,
"token": str(uuid.uuid4()),
"mb_row_id": musicbrainz_row_id,
"email": email,
})
return result.fetchone()["id"]
def update_token(id):
"""Update a user's token to a new UUID
Args:
id (int) - the row id of the user to update
"""
with db.engine.connect() as connection:
try:
connection.execute(sqlalchemy.text("""
UPDATE "user"
SET auth_token = :token
WHERE id = :id
"""), {
"token": str(uuid.uuid4()),
"id": id
})
except DatabaseException as e:
logger.error(e)
raise
USER_GET_COLUMNS = ['id', 'created', 'musicbrainz_id', 'auth_token',
'last_login', 'latest_import', 'gdpr_agreed', 'musicbrainz_row_id', 'login_id']
def get(id: int, *, fetch_email: bool = False):
"""Get user with a specified ID.
Args:
id: ID of a user.
fetch_email: whether to return email in response
Returns:
Dictionary with the following structure:
{
"id": <listenbrainz user id>,
"created": <account creation time>,
"musicbrainz_id": <MusicBrainz username>,
"auth_token": <authentication token>,
"last_login": <date that this user last logged in>,
"latest_import": <date that this user last performed a data import>
"gdpr_agreed": <boolean, if the user has agreed to terms and conditions>,
"musicbrainz_row_id": <musicbrainz row id associated with this user>,
"login_id": <token used for login sessions>
}
"""
columns = USER_GET_COLUMNS + ['email'] if fetch_email else USER_GET_COLUMNS
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT {columns}
FROM "user"
WHERE id = :id
""".format(columns=','.join(columns))), {"id": id})
row = result.fetchone()
return dict(row) if row else None
def get_by_login_id(login_id):
"""Get user with a specified login ID.
Args:
id (UUID): login ID of a user.
Returns:
Dictionary with the following structure:
{
"id": <listenbrainz user id>,
"created": <account creation time>,
"musicbrainz_id": <MusicBrainz username>,
"auth_token": <authentication token>,
"last_login": <date that this user last logged in>,
"latest_import": <date that this user last performed a data import>
"gdpr_agreed": <boolean, if the user has agreed to terms and conditions>,
"musicbrainz_row_id": <musicbrainz row id associated with this user>,
"login_id": <token used for login sessions>
}
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT {columns}
FROM "user"
WHERE login_id = :user_login_id
""".format(columns=','.join(USER_GET_COLUMNS))), {"user_login_id": login_id})
row = result.fetchone()
return dict(row) if row else None
def get_many_users_by_mb_id(musicbrainz_ids: List[str]):
"""Load a list of users given their musicbrainz login name
Args:
musicbrainz_ids: A list of musicbrainz usernames
Returns:
A dictionary where keys are the username, and values are dictionaries of user information
following the same format as `get_by_mb_id`.
If a provided username doesn't exist, it won't be returned.
"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT {columns}
FROM "user"
WHERE LOWER(musicbrainz_id) in :mb_ids
""".format(columns=','.join(USER_GET_COLUMNS))), {"mb_ids": tuple([mbname.lower() for mbname in musicbrainz_ids])})
return {row['musicbrainz_id'].lower(): dict(row) for row in result.fetchall()}
def get_by_mb_id(musicbrainz_id, *, fetch_email: bool = False):
"""Get user with a specified MusicBrainz ID.
Args:
musicbrainz_id (str): MusicBrainz username of a user.
fetch_email: whether to return email in response
Returns:
Dictionary with the following structure:
{
"id": <listenbrainz user id>,
"created": <account creation time>,
"musicbrainz_id": <MusicBrainz username>,
"auth_token": <authentication token>,
"last_login": <date that this user last logged in>,
"latest_import": <date that this user last performed a data import>
"gdpr_agreed": <boolean, if the user has agreed to terms and conditions>,
"musicbrainz_row_id": <musicbrainz row id associated with this user>,
"login_id": <token used for login sessions>
}
"""
columns = USER_GET_COLUMNS + ['email'] if fetch_email else USER_GET_COLUMNS
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT {columns}
FROM "user"
WHERE LOWER(musicbrainz_id) = LOWER(:mb_id)
""".format(columns=','.join(columns))), {"mb_id": musicbrainz_id})
row = result.fetchone()
return dict(row) if row else None
def get_by_token(token: str, *, fetch_email: bool = False):
"""Get user with a specified authentication token.
Args:
token: Authentication token associated with user's account.
fetch_email: whether to return email in response
Returns:
Dictionary with the following structure:
{
"id": <user id>,
"created": <account creation time>,
"musicbrainz_id": <MusicBrainz username>,
}
"""
columns = USER_GET_COLUMNS + ['email'] if fetch_email else USER_GET_COLUMNS
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT {columns}
FROM "user"
WHERE auth_token = :auth_token
""".format(columns=','.join(columns))), {"auth_token": token})
row = result.fetchone()
return dict(row) if row else None
def get_user_count():
""" Get total number of users in database.
Returns:
int: user count
"""
with db.engine.connect() as connection:
try:
result = connection.execute(sqlalchemy.text("""
SELECT count(*) AS user_count
FROM "user"
"""))
row = result.fetchone()
return row['user_count']
except DatabaseException as e:
logger.error(e)
raise
def get_or_create(musicbrainz_row_id: int, musicbrainz_id: str) -> dict:
"""Get user with a specified MusicBrainz ID, or create if there's no account.
Args:
musicbrainz_row_id (int): the MusicBrainz row ID of the user
musicbrainz_id (str): MusicBrainz username of a user.
Returns:
Dictionary with the following structure:
{
"id": <user id>,
"created": <account creation time>,
"musicbrainz_id": <MusicBrainz username>,
"auth_token": <authentication token>,
}
"""
user = get_by_mb_row_id(musicbrainz_row_id, musicbrainz_id=musicbrainz_id)
if not user:
create(musicbrainz_row_id, musicbrainz_id)
user = get_by_mb_row_id(musicbrainz_row_id)
return user
def update_last_login(musicbrainz_id):
""" Update the value of last_login field for user with specified MusicBrainz ID
Args:
musicbrainz_id (str): MusicBrainz username of a user
"""
with db.engine.connect() as connection:
try:
connection.execute(sqlalchemy.text("""
UPDATE "user"
SET last_login = NOW()
WHERE musicbrainz_id = :musicbrainz_id
"""), {
"musicbrainz_id": musicbrainz_id,
})
except sqlalchemy.exc.ProgrammingError as err:
logger.error(err)
raise DatabaseException(
"Couldn't update last_login: %s" % str(err))
def update_latest_import(musicbrainz_id, ts):
""" Update the value of latest_import field for user with specified MusicBrainz ID
Args:
musicbrainz_id (str): MusicBrainz username of user
ts (int): Timestamp value with which to update the database
"""
with db.engine.connect() as connection:
try:
connection.execute(sqlalchemy.text("""
UPDATE "user"
SET latest_import = to_timestamp(:ts)
WHERE musicbrainz_id = :musicbrainz_id
"""), {
'ts': ts,
'musicbrainz_id': musicbrainz_id
})
except sqlalchemy.exc.ProgrammingError as e:
logger.error(e)
raise DatabaseException
def increase_latest_import(musicbrainz_id, ts):
"""Increases the latest_import field for user with specified MusicBrainz ID"""
user = get_by_mb_id(musicbrainz_id)
if ts > int(user['latest_import'].strftime('%s')):
update_latest_import(musicbrainz_id, ts)
def reset_latest_import(musicbrainz_id):
"""Resets the latest_import field for user with specified MusicBrainz ID to 0"""
update_latest_import(musicbrainz_id, 0)
def get_all_users(created_before=None, columns=None):
""" Returns a list of all users in the database
Args:
columns: a list of columns to be returned for each user
created_before (datetime): only return users who were created before this timestamp, defaults to now
Returns: if columns is None, A list of dicts of the following format for each user
{
'id': int
'musicbrainz_id': string
'created': datetime.datetime
'auth_token': uuid
'last_login': datetime.datetime
'latest_import': datetime.datetime
}
otherwise, a list of dicts for each user with only the columns passed as argument
"""
if columns is None:
columns = USER_GET_COLUMNS
if created_before is None:
created_before = datetime.now()
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT {columns}
FROM "user"
WHERE created <= :created
ORDER BY id
""".format(columns=', '.join(columns))), {
"created": created_before,
})
return [dict(row) for row in result]
def delete(id):
""" Delete the user with specified row ID from the database.
Note: this deletes all statistics and api_compat sessions and tokens
associated with the user also.
Args:
id (int): the row ID of the listenbrainz user
"""
with db.engine.connect() as connection:
try:
connection.execute(sqlalchemy.text("""
DELETE FROM "user"
WHERE id = :id
"""), {
'id': id,
})
except sqlalchemy.exc.ProgrammingError as err:
logger.error(err)
raise DatabaseException("Couldn't delete user: %s" % str(err))
def agree_to_gdpr(musicbrainz_id):
""" Update the gdpr_agreed column for user with specified MusicBrainz ID with current time.
Args:
musicbrainz_id (str): the MusicBrainz ID of the user
"""
with db.engine.connect() as connection:
try:
connection.execute(sqlalchemy.text("""
UPDATE "user"
SET gdpr_agreed = NOW()
WHERE LOWER(musicbrainz_id) = LOWER(:mb_id)
"""), {
'mb_id': musicbrainz_id,
})
except sqlalchemy.exc.ProgrammingError as err:
logger.error(err)
raise DatabaseException(
"Couldn't update gdpr agreement for user: %s" % str(err))
def update_musicbrainz_row_id(musicbrainz_id, musicbrainz_row_id):
""" Update the musicbrainz_row_id column for user with specified MusicBrainz username.
Args:
musicbrainz_id (str): the MusicBrainz ID (username) of the user
musicbrainz_row_id (int): the MusicBrainz row ID of the user
"""
with db.engine.connect() as connection:
try:
connection.execute(sqlalchemy.text("""
UPDATE "user"
SET musicbrainz_row_id = :musicbrainz_row_id
WHERE LOWER(musicbrainz_id) = LOWER(:mb_id)
"""), {
'musicbrainz_row_id': musicbrainz_row_id,
'mb_id': musicbrainz_id,
})
except sqlalchemy.exc.ProgrammingError as err:
logger.error(err)
raise DatabaseException(
"Couldn't update musicbrainz row id for user: %s" % str(err))
def get_by_mb_row_id(musicbrainz_row_id, musicbrainz_id=None):
""" Get user with specified MusicBrainz row id.
Note: this function also optionally takes a MusicBrainz username to fall back on
if no user with specified MusicBrainz row ID is found.
Args:
musicbrainz_row_id (int): the MusicBrainz row ID of the user
musicbrainz_id (str): the MusicBrainz username of the user
Returns: a dict representing the user if found, else None.
"""
filter_str = ''
filter_data = {}
if musicbrainz_id:
filter_str = 'OR LOWER(musicbrainz_id) = LOWER(:musicbrainz_id) AND musicbrainz_row_id IS NULL'
filter_data['musicbrainz_id'] = musicbrainz_id
filter_data['musicbrainz_row_id'] = musicbrainz_row_id
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT {columns}
FROM "user"
WHERE musicbrainz_row_id = :musicbrainz_row_id
{optional_filter}
""".format(columns=','.join(USER_GET_COLUMNS), optional_filter=filter_str)), filter_data)
if result.rowcount:
return result.fetchone()
return None
def validate_usernames(musicbrainz_ids):
""" Check existence of users in the database and return those users which exist in order.
Args:
musicbrainz_ids ([str]): a list of usernames
Returns: list of users who exist in the database
"""
with db.engine.connect() as connection:
r = connection.execute(sqlalchemy.text("""
SELECT t.musicbrainz_id as musicbrainz_id, id
FROM "user" u
RIGHT JOIN unnest(:musicbrainz_ids ::text[]) WITH ORDINALITY t(musicbrainz_id, ord)
ON LOWER(u.musicbrainz_id) = t.musicbrainz_id
ORDER BY t.ord
"""), {
'musicbrainz_ids': [musicbrainz_id.lower() for musicbrainz_id in musicbrainz_ids],
})
return [dict(row) for row in r.fetchall() if row['id'] is not None]
def get_users_in_order(user_ids):
with db.engine.connect() as connection:
r = connection.execute(sqlalchemy.text("""
SELECT t.user_id as id, musicbrainz_id
FROM "user" u
RIGHT JOIN unnest(:user_ids ::int[]) WITH ORDINALITY t(user_id, ord)
ON u.id = t.user_id
ORDER BY t.ord
"""), {
'user_ids': user_ids,
})
return [dict(row) for row in r.fetchall() if row['musicbrainz_id'] is not None]
def get_similar_users(user_id: int) -> SimilarUsers:
""" Given a user_id, fetch the similar users for that given user.
Returns a dict { "user_x" : .453, "user_y": .123 } """
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT user_id, similar_users
FROM recommendation.similar_user
WHERE user_id = :user_id
"""), {
'user_id': user_id,
})
row = result.fetchone()
users = {}
for user in row[1]:
users[user] = row[1][user][0]
return SimilarUsers(user_id=row[0], similar_users=users) if row else None
def get_users_by_id(user_ids: List[int]):
""" Given a list of user ids, fetch one ore more users at the same time.
Returns a dict mapping user_ids to user_names. """
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT id, musicbrainz_id
FROM "user"
WHERE id IN :user_ids
"""), {
'user_ids': tuple(user_ids)
})
row_id_username_map = {}
for row in result.fetchall():
row_id_username_map[row['id']] = row['musicbrainz_id']
return row_id_username_map
def is_user_reported(reporter_id: int, reported_id: int):
""" Check whether the user identified by reporter_id has reported the
user identified by reported_id"""
with db.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""
SELECT *
FROM reported_users
WHERE reporter_user_id = :reporter_id
AND reported_user_id = :reported_id
"""), {
"reporter_id": reporter_id,
"reported_id": reported_id
})
return True if result.fetchone() else False
def report_user(reporter_id: int, reported_id: int, reason: str = None):
""" Create a report from user with reporter_id against user with
reported_id"""
with db.engine.connect() as connection:
connection.execute(sqlalchemy.text("""
INSERT INTO reported_users (reporter_user_id, reported_user_id, reason)
VALUES (:reporter_id, :reported_id, :reason)
ON CONFLICT DO NOTHING
"""), {
"reporter_id": reporter_id,
"reported_id": reported_id,
"reason": reason,
})
def update_user_email(musicbrainz_id, email):
""" Update the email field for user with specified MusicBrainz ID
Args:
musicbrainz_id (str): MusicBrainz username of a user
email (str): email of a user
"""
with db.engine.connect() as connection:
try:
connection.execute(sqlalchemy.text("""
UPDATE "user"
SET email = :email
WHERE musicbrainz_id = :musicbrainz_id
"""), {
"musicbrainz_id": musicbrainz_id,
"email": email
})
except sqlalchemy.exc.ProgrammingError as err:
logger.error(err)
raise DatabaseException(
"Couldn't update user's email: %s" % str(err))
| gpl-2.0 | -7,587,009,513,787,705,000 | 33.905429 | 123 | 0.58261 | false |
ahill818/MetPy | metpy/plots/tests/test_ctables.py | 1 | 4389 | # Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Tests for the `ctables` module."""
import os.path
import tempfile
try:
buffer_args = dict(bufsize=0)
from StringIO import StringIO
except ImportError:
buffer_args = dict(buffering=1)
from io import StringIO
import numpy as np
import pytest
from metpy.plots.ctables import ColortableRegistry, convert_gempak_table
@pytest.fixture()
def registry():
"""Set up a registry for use by the tests."""
return ColortableRegistry()
def test_package_resource(registry):
"""Test registry scanning package resource."""
registry.scan_resource('metpy.plots', 'nexrad_tables')
assert 'cc_table' in registry
def test_scan_dir(registry):
"""Test registry scanning a directory and ignoring files it can't handle ."""
try:
kwargs = dict(mode='w', dir='.', suffix='.tbl', delete=False, **buffer_args)
with tempfile.NamedTemporaryFile(**kwargs) as fobj:
fobj.write('"red"\n"lime"\n"blue"\n')
fname = fobj.name
# Unrelated table file that *should not* impact the scan
with tempfile.NamedTemporaryFile(**kwargs) as fobj:
fobj.write('PADK 704540 ADAK NAS\n')
bad_file = fobj.name
# Needs to be outside with so it's closed on windows
registry.scan_dir(os.path.dirname(fname))
name = os.path.splitext(os.path.basename(fname))[0]
assert name in registry
assert registry[name] == [(1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)]
finally:
os.remove(fname)
os.remove(bad_file)
def test_read_file(registry):
"""Test reading a colortable from a file."""
fobj = StringIO('(0., 0., 1.0)\n"red"\n"#0000FF" #Blue')
registry.add_colortable(fobj, 'test_table')
assert 'test_table' in registry
assert registry['test_table'] == [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 0.0, 1.0)]
def test_read_bad_file(registry):
"""Test what error results when reading a malformed file."""
with pytest.raises(RuntimeError):
fobj = StringIO('PADK 704540 ADAK NAS '
'AK US 5188 -17665 4 0')
registry.add_colortable(fobj, 'sfstns')
def test_get_colortable(registry):
"""Test getting a colortable from the registry."""
true_colors = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0)]
registry['table'] = true_colors
table = registry.get_colortable('table')
assert table.N == 2
assert table.colors == true_colors
def test_get_steps(registry):
"""Test getting a colortable and norm with appropriate steps."""
registry['table'] = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)]
norm, cmap = registry.get_with_steps('table', 5., 10.)
assert cmap(norm(np.array([6.]))).tolist() == [[0.0, 0.0, 1.0, 1.0]]
assert cmap(norm(np.array([14.9]))).tolist() == [[0.0, 0.0, 1.0, 1.0]]
assert cmap(norm(np.array([15.1]))).tolist() == [[1.0, 0.0, 0.0, 1.0]]
assert cmap(norm(np.array([26.]))).tolist() == [[0.0, 1.0, 0.0, 1.0]]
def test_get_steps_negative_start(registry):
"""Test bad start for get with steps (issue #81)."""
registry['table'] = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)]
norm, _ = registry.get_with_steps('table', -10, 5)
assert norm.vmin == -10
assert norm.vmax == 5
def test_get_boundaries(registry):
"""Test getting a colortable with explicit boundaries."""
registry['table'] = [(0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0)]
norm, cmap = registry.get_with_boundaries('table', [0., 8., 10., 20.])
assert cmap(norm(np.array([7.]))).tolist() == [[0.0, 0.0, 1.0, 1.0]]
assert cmap(norm(np.array([9.]))).tolist() == [[1.0, 0.0, 0.0, 1.0]]
assert cmap(norm(np.array([10.1]))).tolist() == [[0.0, 1.0, 0.0, 1.0]]
def test_gempak():
"""Test GEMPAK colortable conversion."""
infile = StringIO('''! wvcolor.tbl
0 0 0
255 255 255
''')
outfile = StringIO()
# Do the conversion
convert_gempak_table(infile, outfile)
# Reset and grab contents
outfile.seek(0)
result = outfile.read()
assert result == '(0.000000, 0.000000, 0.000000)\n(1.000000, 1.000000, 1.000000)\n'
| bsd-3-clause | 7,920,223,734,139,306,000 | 33.559055 | 88 | 0.593074 | false |
fanc999/gtk-msvc-projects | fontconfig/win32/fcpc.py | 1 | 2059 | # Simple script to generate pkg-config (.pc) file
# for gsettings-desktop-schemas
import os
import sys
import argparse
from replace import replace_multi
from pc_base import BasePCItems
def main(argv):
base_pc = BasePCItems()
fc_parser = argparse.ArgumentParser(description='Setup basic .pc file info')
fc_parser.add_argument('--libxml2',
action='store_const',
const=1, default=0,
help='Create .pc for fontconfig')
base_pc.setup(argv, fc_parser)
fc_args = fc_parser.parse_args()
if getattr(fc_args, 'libxml2', None) is 1:
expat_cflags = ''
expat_lib = ''
libxml2_cflags = '-I${includedir}/libxml2'
libxml2_lib = 'libxml2.lib'
else:
expat_cflags = ''
expat_lib = '-lexpat'
libxml2_cflags = ''
libxml2_lib = ''
fc_pc_replace_items = {'@sysconfdir@': '${prefix}/bin',
'@localstatedir@': '',
'@PACKAGE@': 'fontconfig',
'@BASECONFIGDIR@': '${sysconfdir}/fonts',
'@fc_cachedir@': '',
'@PKGCONFIG_REQUIRES@': 'freetype2',
'@PKGCONFIG_REQUIRES_PRIVATELY@': '',
'@FREETYPE_CFLAGS@': '',
'@FREETYPE_LIBS@': '-lfreetype',
'@ICONV_CFLAGS@': '',
'@ICONV_LIBS@': '-liconv',
'@EXPAT_CFLAGS@': expat_cflags,
'@LIBXML2_CFLAGS@': libxml2_cflags,
'@EXPAT_LIBS@': expat_lib,
'@LIBXML2_LIBS@': libxml2_lib}
fc_pc_replace_items.update(base_pc.base_replace_items)
# Generate fontconfig.pc
replace_multi(base_pc.top_srcdir + '/fontconfig.pc.in',
base_pc.prefix + '/lib/pkgconfig/fontconfig.pc',
fc_pc_replace_items)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| lgpl-2.1 | -792,207,952,905,173,200 | 35.122807 | 80 | 0.487615 | false |
rob-metalinkage/django-gazetteer | gazetteer/rdf_io_config.py | 1 | 15296 | #
# Configures RDF_IO mappings to make gazetteer source links available as Linked Data resources
#
from uriredirect.models import *
from gazetteer.models import GazSource,GazSourceConfig,LocationTypeField,CodeFieldConfig,NameFieldConfig
from rdf_io.models import ObjectMapping, ObjectType
from gazetteer.settings import TARGET_NAMESPACE_FT
from django.conf import settings
SITEURL=settings.SITEURL
if SITEURL[-1:] == '/' :
SITEURL=SITEURL[:-1]
try:
TARGET_NAMESPACE_FT=settings.TARGET_NAMESPACE_FT
except:
pass
RDFSTORE=settings.RDFSTORE
RDFSERVER=settings.RDFSERVER
from rdf_io.models import Namespace, ObjectType,ObjectMapping,AttributeMapping,EmbeddedMapping
from django.contrib.contenttypes.models import ContentType
from skosxl.models import Scheme
# mappings for format codes for different technologies
RDFLIB_CODES = { 'xml': 'xml' , 'ttl' : 'turtle', 'json' : 'json-ld', 'html' : 'html' , 'rdf' : 'xml' }
GEOSERVER_CODES = { 'xml': 'gml3' , 'gml' : 'gml3', 'json' : 'json', 'kml' : 'kml' }
ELDA_CODES = { 'xml': 'xml' , 'ttl' : 'ttl', 'json' : 'json', 'html' : 'html' , 'rdf' : 'rdf' }
def load_base_namespaces():
"""
load namespaces for the meta model
"""
_loadNamespace( uri='http://www.w3.org/1999/02/22-rdf-syntax-ns#', prefix='rdf' , defaults={ 'notes' : 'RDF' } )
_loadNamespace( uri='http://www.w3.org/2000/01/rdf-schema#', prefix='rdfs' , defaults = { 'notes' : 'RDFS' } )
_loadNamespace( uri='http://www.w3.org/2004/02/skos/core#', prefix='skos' , defaults = { 'notes' : 'SKOS' } )
_loadNamespace( uri='http://www.w3.org/2008/05/skos-xl#', prefix='skosxl' , defaults = { 'notes' : 'SKOSXL' } )
_loadNamespace( uri='http://xmlns.com/foaf/0.1/', prefix='foaf' , defaults = { 'notes' : 'FOAF' } )
_loadNamespace( uri='http://purl.org/dc/terms/', prefix='dct' , defaults = { 'notes' : 'Dublin Core Terms' } )
_loadNamespace( uri='http://www.w3.org/ns/dcat#', prefix='dcat' , defaults = { 'notes' : 'DCAT' } )
_loadNamespace( uri='http://www.w3.org/2001/XMLSchema#', prefix='xsd' , defaults = { 'notes' : 'XSD' } )
_loadNamespace( uri='http://id.sirf.net/def/schema/lid/', prefix='lid' , defaults = { 'notes' : 'LID - allows characterisation of resources such as VoiD:technicalFeatures against Linked Data API view names' } )
_loadNamespace( uri='http://rdfs.org/ns/void#', prefix='void' , defaults = { 'notes' : 'VoiD - vocabulary of interlinked datasets' } )
_loadNamespace( uri='http://www.w3.org/2003/01/geo/wgs84_pos#', prefix='geo' , defaults = { 'notes' : 'geo WGS84 positioning' } )
# TODO - should point to stable defs once published
_loadNamespace( uri='https://gazetteer.mapstory.org/def/ontology/mapstory_api/', prefix='msapi' , defaults = { 'notes' : 'Mapstory API definitions - VoiD descriptions to generate links to resources' } )
_loadNamespace( uri='https://gazetteer.mapstory.org/def/ontology/geonode_api/', prefix='gnapi' , defaults = { 'notes' :
# TODO - global master or local FT list?
'Geonode API definitions - VoiD descriptions to generate links to resources' } )
_loadNamespace( uri=TARGET_NAMESPACE_FT, prefix='gft' , defaults = { 'notes' : 'Gazetteer Feature Types'} )
# these are for generated resources and should be synced to SITEURL - unless some other hostname spoofing technique is to be used.
_loadNamespace( uri=''.join((SITEURL,'/def/gazetteer/sources/')), prefix='gazsrc' , defaults = { 'notes' : 'Gazetteer sources - uploaded layers from which locations are mapped' } )
_loadNamespace( uri=''.join((SITEURL,'/def/gazetteer/index/')), prefix='gaz' , defaults = { 'notes' : 'Master gazetteer dataset - the index of all place names' } )
_loadNamespace( uri='https://gazetteer.mapstory.org/def/ft/', prefix='gftsrc' , defaults = { 'notes' : 'source feature type codes' } )
print "loading base namespaces"
def _loadNamespace(uri,prefix,defaults):
"""Brutally load namespace killing any existing namespace with uri or prefix that matches"""
msg = ""
try:
pre = Namespace.objects.get(uri=uri)
msg = "Replacing ns with URI %s" % uri
pre.delete()
except:
pass
try:
pre = Namespace.objects.get(prefix=prefix)
msg = " , ".join(("Replacing ns with Prefix %s" % prefix,msg))
pre.delete()
except:
pass
Namespace.objects.get_or_create( uri=uri, prefix=prefix, defaults = defaults )
# TODO should log these I guess.
return msg
def _rdf4j_push_context(rdfstore, resttgt, model, obj, gr ):
#import pdb; pdb.set_trace()
headers = {'Content-Type': 'application/x-turtle;charset=UTF-8'}
for h in rdfstore.get('headers') or [] :
headers[h] = _resolveTemplate( rdfstore['headers'][h], model, obj )
result = requests.put( resttgt, headers=headers , data=gr.serialize(format="turtle"))
logger.info ( "Updating resource {} {}".format(resttgt,result.status_code) )
if result.status_code > 400 :
# print "Posting new resource"
# result = requests.post( resttgt, headers=headers , data=gr.serialize(format="turtle"))
logger.error ( "Failed to publish resource {} {}".format(resttgt,result.status_code) )
return HttpResponse ("Failed to publish resource {} {}".format(resttgt,result.status_code) , status = result.status_code )
return result
def _clean_rules(label):
try:
apirule = RewriteRule.objects.get(label=label)
AcceptMapping.objects.filter(rewrite_rule=apirule).delete()
RewriteRule.objects.get(label=label).delete()
except:
pass
def _new_rule(formats, label='', elda_tweak=False, parent=None, register=None,service_location=None,pattern=None,view_pattern=None,description='',tgt='', format_param='_format' ):
"""Sets up a basic rule using LDA defaults and an mapping of available media types"""
#_clean_rules(label)
(rule,created) = RewriteRule.objects.get_or_create(label=label , defaults = {
'description' : description,
'parent' : parent ,
'register' : register ,
'service_location' : service_location,
'service_params' : None ,
'pattern' : pattern ,
'use_lda' : True ,
'view_param' : '_view' ,
'view_pattern' : view_pattern} )
if not created:
for attr,val in ( ('label',label), ('parent',parent), ('register',register),('service_location',service_location),('pattern',pattern),('view_pattern',view_pattern),('description',description)) :
if val and hasattr(rule, attr):
setattr(rule, attr, val)
rule.save(force_update=True)
for ext in formats.keys() :
mt = MediaType.objects.get(file_extension=ext)
# avoid bug in ELDA HTML - using file extensions but ignoring the fact format overrides it
if elda_tweak and ext == 'html' :
fmt = ''
else:
fmt = ''.join(('&',format_param,'=',formats[ext]))
try:
accept = AcceptMapping.objects.get(rewrite_rule=rule,media_type=mt)
accept.redirect_to = "".join((tgt, fmt))
accept.save()
except:
AcceptMapping.objects.create(rewrite_rule=rule,media_type=mt, redirect_to="".join((tgt, fmt)) )
return rule
def load_urirules() :
"""Load uriredirect rules for Gazetteer object types.
Loads a set of URL rewriting rules for objects managed or specified as links in the Gazetteer model
these namespaces apply:
Note - we could chain to the SKOSXL module to load rules for vocabularies - at the moment these are specified in the settings via SKOSPATHS=('ft','featuretypes') and loaded by the SKOSXL module.
"""
sep = '/'
if SITEURL[-1:] == '/' :
sep = ''
try:
defaultroot = sep.join((SITEURL,"def"))
except:
defaultroot = sep.join((SITEURL[0],"def"))
(reg,created) = UriRegister.objects.get_or_create(label='gazetteer', defaults = { 'url' : '/'.join((defaultroot,'gazetteer')) , 'can_be_resolved' : True} )
indexbaserule=_new_rule(RDFLIB_CODES,label = 'Gazetteer Master Index Metadata', register=reg,service_location="".join((RDFSERVER,"/dna")), pattern='index$',description='Rules for Gazetteer master index metadata',tgt='${server}/skos/resource?uri=${uri}')
label = 'Gazetteer metadata'
viewlist = [ {'name': 'alternates', 'formats' : ELDA_CODES , 'apipath': '${server}/lid/resourcelist?baseuri=${uri}&item=None' },
{'name': 'lid', 'formats' : ELDA_CODES , 'apipath': '${server}/skos/resource?uri=${uri}' },
]
for view in viewlist:
id = ' : '.join((label,"view",view['name']))
_new_rule(view['formats'], elda_tweak=True, parent=indexbaserule, label = id, view_pattern= view['name'], description=' : '.join((label,view['name'])) , tgt=view['apipath'] )
locationbaserule=_new_rule(RDFLIB_CODES,label = 'Gazetteer Master Index', register=reg,service_location=SITEURL, pattern='index/(?P<id>\d+)$',description='Rules for Gazetteer master index items',tgt='${server}/rdf_io/to_rdf/location/id/$1?')
label = 'Gazetteer index'
viewlist = [ {'name': 'alternates', 'formats' : ELDA_CODES , 'apipath': "".join((RDFSERVER,'/dna/lid/resourcelist?baseuri=${server}/def/gazetteer/${path_base}&item=${term}')) },
{'name': 'lid', 'formats' : RDFLIB_CODES , 'apipath': ''.join((SITEURL,'/rdf_io/to_rdf/location/id/$1?')) },
]
for view in viewlist:
id = ' : '.join((label,"view",view['name']))
_new_rule(view['formats'], elda_tweak=True, parent=locationbaserule, label = id, view_pattern= view['name'], description=' : '.join((label,view['name'])) , tgt=view['apipath'] )
_new_rule(GEOSERVER_CODES, elda_tweak=False, parent=locationbaserule, label = 'Gazetteer Master Index WFS binding', view_pattern='msapi:wfs', description='WFS call using ID for Gazetteer master index items' , tgt='${server}/geoserver/geonode/ows?service=WFS&version=1.0.0&request=GetFeature&typeName=geonode:gazetteer&CQL_FILTER=id%3D$1', format_param='outputFormat' )
sourcebaserule=_new_rule(ELDA_CODES, elda_tweak=True,
register=reg, label = 'Gazetteer Sources', service_location= "".join((RDFSERVER,"/dna")),
pattern='sources/(?P<source>[^\?]+)',
description='Rules for Gazetteer sources - spatial data objects registered in Linked Data view' ,
tgt='${server}/skos/resource?uri=${uri}' )
label = 'Gazetteer source data'
viewlist = [ {'name': 'alternates', 'formats' : ELDA_CODES , 'apipath': '${server}/lid/resourcelist?baseuri=${uri}&item=None' },
{'name': 'lid', 'formats' : ELDA_CODES , 'apipath': '${server}/skos/resource?uri=${uri}' },
{'name': 'name', 'formats' : { 'json' : 'json' }, 'apipath': ''.join((SITEURL,'/gazetteer/location/find?name=$q{name}')) },
{'name': 'namestart', 'formats' : { 'json' : 'json' }, 'apipath': ''.join((SITEURL,'/gazetteer/location/find?name=$q{name}')) },
]
for view in viewlist:
id = ' : '.join((label,"view",view['name']))
_new_rule(view['formats'], elda_tweak=True, parent=sourcebaserule, label = id, view_pattern= view['name'], description=' : '.join((label,view['name'])) , tgt=view['apipath'] )
def load_rdf_mappings():
"""
load RDF mappings for Gazetteer Objects - locations and data sources
"""
# Source Dataset - including minimal attribute mappings to to allow clients to link from source to gazetteer search.
(object_type,created) = ObjectType.objects.get_or_create(uri="msapi:SourceDataset", defaults = { "label" : "MapStory Layer" })
# quote the target URI namespace as its a constant, not pulled from the model
pm = new_mapping(object_type, "GazSource", "Gazetteer Source", "source", ''.join(('"',SITEURL,'/def/gazetteer/sources/"')), True )
# specific mapping
am = AttributeMapping(scope=pm, attr="filter", predicate="msapi:sourceFilter", is_resource=False).save()
am = AttributeMapping(scope=pm, attr="config.namefieldconfig.field", predicate="msapi:attr", is_resource=False).save()
em = EmbeddedMapping(scope=pm, attr="config.codefieldconfig" , predicate="msapi:codefield", struct="""msapi:attr field ; msapi:namespace namespace""" , use_blank=True ).save()
(object_type,created) = ObjectType.objects.get_or_create(uri="msapi:Location", defaults = { "label" : "Gazetteer Location" })
# quote the target URI namespace as its a constant, not pulled from the model
pm = new_mapping(object_type, "Location", "Gazetteer entry", "id", ''.join(('"',SITEURL,'/def/gazetteer/index/"')), False )
# specific mapping
am = AttributeMapping(scope=pm, attr="locationType.term", predicate="msapi:locationTypeCode", is_resource=False).save()
am = AttributeMapping(scope=pm, attr="locationType.uri", predicate="msapi:locationType", is_resource=True).save()
am = AttributeMapping(scope=pm, attr="latitude", predicate="geo:lat", is_resource=False).save()
am = AttributeMapping(scope=pm, attr="longitude", predicate="geo:long", is_resource=False).save()
am = AttributeMapping(scope=pm, attr="locationname.name@language", predicate="skos:altLabel", is_resource=False).save()
am = AttributeMapping(scope=pm, attr="defaultName", predicate="skos:prefLabel", is_resource=False).save()
em = EmbeddedMapping(scope=pm, attr="locationname[namespace=]" , predicate="msapi:code", struct="""msapi:name name ; msapi:language language ; msapi:namespace namespace ; msapi:startDate startDate ; msapi:endDate endDate ; msapi:source nameUsed.source""" ).save()
em = EmbeddedMapping(scope=pm, attr="locationname[namespace=None]" , predicate="msapi:name", struct="""msapi:name name ; msapi:language language ; msapi:namespace namespace ; msapi:startDate startDate ; msapi:endDate endDate ; msapi:source nameUsed.source """ ).save()
em = EmbeddedMapping(scope=pm, attr="id" , predicate="rdfs:seeAlso", struct="<{$URI}?_view=alternates>" ).save()
em = EmbeddedMapping(scope=pm, attr="locationname[namespace=].nameUsed" , predicate="msapi:codesource", struct="""msapi:source source; rdfs:seeAlso <%s/gazetteer/location/{^id}/sourcewfs/{source}> ; rdfs:seeAlso <%s/def/gazetteer/sources/{source}?_view=alternates>""" % (SITEURL,SITEURL) ).save()
def new_mapping(object_type,content_type_label, title, idfield, tgt, autopush):
content_type = ContentType.objects.get(app_label="gazetteer",model=content_type_label.lower())
ObjectMapping.objects.filter(name=title).delete()
(pm,created) = ObjectMapping.objects.get_or_create(name=title, defaults =
{ "auto_push" : autopush ,
"id_attr" : idfield,
"target_uri_expr" : tgt,
"content_type" : content_type
})
if not created :
AttributeMapping.objects.filter(scope=pm).delete()
EmbeddedMapping.objects.filter(scope=pm).delete()
pm.obj_type.add(object_type)
pm.save()
return pm | cc0-1.0 | -6,787,660,445,330,739,000 | 59.702381 | 372 | 0.648209 | false |
i-namekawa/TopSideMonitor | topsidecameras.py | 1 | 11038 | import datetime, os, subprocess, sys
from time import time, sleep
import cv2
import numpy as np
cwd = os.getcwd()
try:
import motmot.cam_iface.cam_iface_ctypes as cam_iface
if not cam_iface.get_num_cameras():
print 'No IEEE1394 camera found'
cam_iface = None
except ImportError:
cam_iface = None
os.chdir(cwd)
def twocameras(camerafps, videofps, datafolder, maxTime, qscale, textOn, textcolor, textpos, parent):
# top view camera
if cam_iface: # initilize IEEE1394 camera when available
'''
IEEE1394 camera is controlled by a patched motmot.cam_iface-0.5.4 module
on Python 2.7 and fview-flytrax-0.6.5 (cam_iface_mega.dll)
from the motmot project (http://code.astraw.com/projects/motmot/).
For this patch, egg is deleted and source files in build\lib\motmot\cam_iface
are manually copied and __init__.py added to make it a python package.
add unicode to this line in cam_iface_ctypes.py
line36: backend_path = unicode(os.environ.get('CAM_IFACE_CTYPES_PATH',None))
Then, set the enviromental variable "CAM_IFACE_CTYPES_PATH" to
r'C:\Program Files (x86)\fview-flytrax-0.6.5'.
'''
mode_num = 0
device_num = 0
num_buffers = 32
cam = cam_iface.Camera(device_num, num_buffers, mode_num)
cam.start_camera()
frame = cam.grab_next_frame_blocking()
height, width = frame.shape
else:
# -1: if there more than one, a dialog to choose will pop up
cam = cv2.VideoCapture(0)
success, frame = cam.read()
if success:
height, width, _ = frame.shape
print height, width
else:
print 'top webcam failed to initialize'
return False
# side view camera
if not cam_iface:
webcam = cv2.VideoCapture(1)
else:
# -1: choose a 2nd camera from dialog
webcam = cv2.VideoCapture(-1)
success, frame = webcam.read()
if success:
height, width, _ = frame.shape
print height, width
else:
print 'side webcam failed to initialize'
return False
webcam.set(cv2.cv.CV_CAP_PROP_FPS, camerafps)
# initilize parameters
recording = False
tracking = False
fno = 0
if parent:
parent.gauge.SetValue(0)
_x = 1280-width-15 # hardcoding x position
cv2.namedWindow('TopView')
cv2.moveWindow('TopView', x=_x, y=30)
cv2.namedWindow('SideView')
cv2.moveWindow('SideView', x=_x, y=1024-height+52)
while True:
if cam_iface:
frame1 = cam.grab_next_frame_blocking()
else:
success, frame1 = cam.read()
frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
success, frame2 = webcam.read()
if success:
if recording:
elapsed = datetime.timedelta(seconds=time() - t0)
if textOn:
cv2.putText(frame1,
'elapsed %s, frame %d'%(str(elapsed)[:-4], fno),
textpos, cv2.FONT_HERSHEY_SIMPLEX, 0.4, textcolor)
buf = np.vstack( (frame1, cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)) )
p.stdin.write( buf.tostring() ) # faster than PIL
if maxTime < time()-t0 and maxTime > 0:
p.stdin.close()
recording = False
fno = 0
if parent:
parent.datafolder.SetBackgroundColour((255,255,255,255))
parent.gauge.SetValue(0)
parent.LED.SetValue('0')
parent.Refresh()
sleep(1)
if maxTime:
print '\n%d s of recording done.\nFile: %s\n' % (maxTime, fp)
elif parent:
parent.LED.SetValue( "%02d %02d %02d" % (elapsed.seconds // 3600,
elapsed.seconds//60 % 60,
elapsed.seconds % 60 ))
#parent.gauge.SetValue(int((fno+1)/(maxFrame-1)*100))
if maxFrame:
parent.gauge.SetValue((fno+1+1)/(maxFrame-1)*100)
fno +=1
if tracking:
mog1.apply(frame1.copy(), fgmask1, -3)
mog2.apply(frame2.copy(), fgmask2, -3)
#cv2.updateMotionHistory(fgmask1, mhi1, time()-t0, 3)
#cv2.updateMotionHistory(fgmask2, mhi2, time()-t0, 3)
#cv2.imshow("TopView", mhi1)
#cv2.imshow("SideView", mhi2)
cv2.imshow("TopView", fgmask1)
cv2.imshow("SideView", fgmask2)
else:
cv2.line(frame1, pt1=(0,240), pt2=(640,240),color=155)
cv2.line(frame1, pt1=(610,0), pt2=(610,480),color=155)
cv2.imshow("TopView", frame1)
# vertical line
cv2.line(frame2, pt1=(320,0), pt2=(320,480),color=(55,255,255))
# horizontal lines
cv2.line(frame2, pt1=(0,45), pt2=(640,45),color=(55,255,255))
cv2.line(frame2, pt1=(0,225), pt2=(640,225),color=(55,255,255))
# inflow tubes
cv2.line(frame2, pt1=(40,66), pt2=(46,111),color=(55,255,255))
cv2.line(frame2, pt1=(600,66), pt2=(594,111),color=(55,255,255))
cv2.imshow("SideView", frame2)
char = cv2.waitKey(1) # need this to update windows
if char == 27: # ESC
if parent:
if not parent.lock.GetValue():
break
else:
break
elif char == 116: # "T"
if not tracking and not recording:
mog1 = cv2.BackgroundSubtractorMOG(
history = 30,
nmixtures = 3, # normally 3-5
backgroundRatio = 0.1, # normally 0.1-0.9
noiseSigma = 15 # nomally 15
)
fgmask1 = np.zeros((height, width), dtype=np.uint8)
mhi1 = np.zeros((height, width), dtype=np.float32)
mog2 = cv2.BackgroundSubtractorMOG(
history = 30,
nmixtures = 3, # normally 3-5
backgroundRatio = 0.1, # normally 0.1-0.9
noiseSigma = 15 # nomally 15
)
height2, width2, colordepth = frame2.shape
fgmask2 = np.zeros((height2, width2), dtype=np.uint8)
mhi2 = np.zeros((height2, width2), dtype=np.float32)
t0 = time()
tracking = True
elif not recording:
tracking = False
elif char == 114: # "R"
if recording:
if parent:
if not parent.lock.GetValue():
recording = False
p.stdin.close()
fno = 0
else:
recording = False
p.stdin.close()
fno = 0
if parent:
parent.datafolder.SetBackgroundColour((255,255,255,255))
parent.gauge.SetValue(0)
parent.LED.SetValue('0')
parent.Refresh()
else:
if parent:
videofps = parent.videofps.GetValue()
camerafps = parent.camerafps.GetValue()
textpos = tuple([int(aa) for aa in parent.textpos.GetValue().split(',')])
textcolor = int(parent.textcolor.GetValue())
textOn = parent.textOn.GetValue()
qscale = parent.qscale.GetValue()
if parent.freeRun.GetValue():
maxTime = None
maxFrame = 0
else:
maxFrame = parent.maxFrame.GetValue()
maxTime = maxFrame / camerafps
#maxFrame = float(maxTime * camerafps)
print 'maxFrame', maxFrame
recording = True
fname = datetime.datetime.today().strftime( "%b-%d-%Y_%H_%M_%S.avi" )
fp = os.path.join(datafolder, fname)
cmdstring = ['.\\resources\\ffmpeg.exe',
'-y',
'-r', '%f' % videofps,
#'-s', 'vga',
'-s', '%d, %d' % (width, height*2),
'-an',
'-analyzeduration', '0', # skip auto codec analysis
'-vf', 'scale=0',
'-f', 'rawvideo',
'-pix_fmt', 'gray',
'-vcodec', 'rawvideo',
'-i', '-',
#'-pix_fmt','yuv420p',
'-vcodec', 'mpeg4'] # should be same as libxvid
if qscale>31:
cmdstring.append('-b')
else:
cmdstring.append('-qscale')
cmdstring.extend([str(qscale),fp])
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(cmdstring,
stdin=subprocess.PIPE,
bufsize=-1,
startupinfo=startupinfo
)
t0 = time()
if parent:
parent.datafolder.SetBackgroundColour((255,255,100, 155))
parent.Refresh()
# before exiting process
if cam_iface:
cam.close()
else:
cam.release()
webcam.release()
cv2.destroyAllWindows()
if parent:
parent.gauge.SetValue(0)
parent.LED.SetValue('0')
parent.Refresh()
return True
if __name__ == '__main__':
camera = twocameras(
camerafps=30,
videofps=10,
datafolder=r'D:\Data\itoiori\zebralab\2015\2015-02-16\test',
maxTime=5,
qscale=5,
textOn=False,
textcolor=222,
textpos=(100,220),
parent=None
)
| bsd-3-clause | -5,700,741,396,932,234,000 | 35.916388 | 101 | 0.453887 | false |
waynegm/OpendTect-External-Attributes | Python_3/Jupyter/pwc_tvdip.py | 1 | 7391 | """
Ported by Massimo Vassalli [http://mv.nanoscopy.eu [email protected]]
"""
import scipy.sparse as sparse
import scipy.sparse.linalg as splin
import numpy.linalg as linalg
import numpy as np
def pwc_tvdip(y, lamb=[1.0], display=True, stoptol=1e-3, maxiter=60, full=False):
# Performs discrete total variation denoising (TVD) using a primal-dual
# interior-point solver. It minimizes the following discrete functional:
#
# E=(1/2)||y-x||_2^2+lambda*||Dx||_1,
#
# over the variable x, given the input signal y, according to each
# value of the regularization parameter lambda > 0. D is the first
# difference matrix. Uses hot-restarts from each value of lambda to speed
# up convergence for subsequent values: best use of this feature is made by
# ensuring that the chosen lambda values are close to each other.
#
# Usage:
# [x, E, s, lambdamax] = pwc_tvdip(y, lambda, display, stoptol, maxiter)
#
# Input arguments:
# - y Original signal to denoise, size N x 1.
# - lambda A vector of positive regularization parameters, size L x 1.
# TVD will be applied to each value in the vector.
# - display (Optional) Set to 0 to turn off progress display, 1 to turn
# on. If not specifed, defaults to progress display on.
# - stoptol (Optional) Precision as determined by duality gap tolerance,
# if not specified, defaults to 1e-3.
# - maxiter (Optional) Maximum interior-point iterations, if not
# specified defaults to 60.
#
# Output arguments:
# - x Denoised output signal for each value of lambda, size N x L.
# - E Objective functional at minimum for each lambda, size L x 1.
# - s Optimization result, 1 = solved, 0 = maximum iterations
# exceeded before reaching duality gap tolerance, size L x 1.
# - lambdamax Maximum value of lambda for the given y. If
# lambda >= lambdamax, the output is the trivial constant
# solution x = mean(y).
#
# (c) Max Little, 2011. Based around code originally written by
# S.J. Kim, K. Koh, S. Boyd and D. Gorinevsky. If you use this code for
# your research, please cite:
# M.A. Little, Nick S. Jones (2011)
# "Generalized Methods and Solvers for Noise Removal from Piecewise
# Constant Signals: Part I - Background Theory"
# Proceedings of the Royal Society A (in press)
#
# This code is released under the terms of GNU General Public License as
# published by the Free Software Foundation; version 2 or later.
y = np.array(y)
# Search tuning parameters
ALPHA = 0.01 # Backtracking linesearch parameter (0,0.5]
BETA = 0.5 # Backtracking linesearch parameter (0,1)
MAXLSITER = 20 # Max iterations of backtracking linesearch
MU = 2 # t update
N = len(y) # Length of input signal y
M = N-1 # Size of Dx
# Construct sparse operator matrices
O1 = sparse.lil_matrix((M,M+1))
O2 = sparse.lil_matrix((M,M+1))
for i in range(M):
O1[i,i]=1.0
O2[i,i+1]=1.0
D = O1-O2
DDT = D.dot(D.transpose())
Dy = D.dot(y)
# Find max value of lambda
lambdamax = np.max(np.abs(splin.spsolve(DDT,Dy)))
if (display):
print('lambda_max={0}'.format(lambdamax))
L = len(lamb)
x = np.zeros((N, L))
s = np.zeros(L)
E = np.zeros(L)
# Optimization variables set up once at the start
z = np.zeros(M) # Dual variable
mu1 = np.ones(M) # Dual of dual variable
mu2 = np.ones(M) # Dual of dual variable
# Work through each value of lambda, with hot-restart on optimization
# variables
for l in range(L):
t = 1e-10;
step = np.Inf;
f1 = z-lamb[l];
f2 = -z-lamb[l];
# Main optimization loop
s[l] = True;
if (display):
print('Solving for lambda={0}, lambda/lambda_max={1}\nIter# Primal Dual Gap'.format(lamb[l], lamb[l]/lambdamax))
for iters in range(maxiter):
DTz = (z*D)
DDTz = D*DTz
w = Dy-(mu1-mu2)
# Calculate objectives and primal-dual gap
pobj1 = 0.5*w.dot(splin.spsolve(DDT,w))+lamb[l]*np.sum(mu1+mu2)
pobj2 = 0.5*DTz.dot(DTz)+lamb[l]*np.sum(np.abs(Dy-DDTz))
pobj = min(pobj1,pobj2)
dobj = -0.5*DTz.dot(DTz)+Dy.dot(z)
gap = pobj - dobj
if (display):
print('{0} -- {1} -- {2} -- {3}'.format(iters, pobj, dobj, gap))
if (gap <= stoptol):
s[l] = True
break;
if (step >= 0.2):
t = max(2*M*MU/gap, 1.2*t)
# Do Newton step
rz = DDTz - w
SSS = sparse.lil_matrix((M,M))
for i in range(M):
SSS[i,i]=(mu1/f1+mu2/f2)[i]
S = DDT - SSS
r = -DDTz + Dy + (1/t)/f1 - (1/t)/f2
dz=splin.spsolve(S,r)
dmu1 = -(mu1+((1/t)+dz*mu1)/f1)
dmu2 = -(mu2+((1/t)-dz*mu2)/f2)
resDual = rz
resCent = resCent = np.concatenate((-mu1*f1-1/t, -mu2*f2-1/t))
residual= residual=np.concatenate((resDual,resCent))
negIdx2 = (dmu2 < 0)
negIdx1 = (dmu1 < 0)
step=1.0
if (negIdx1.any()):
print()
step = min( step, 0.99*min(-mu1[negIdx1]/dmu1[negIdx1]) )
if (negIdx2.any()):
step = min( step, 0.99*min(-mu2[negIdx2]/dmu2[negIdx2]) )
for liter in range(MAXLSITER):
newz = z + step*dz
newmu1 = mu1 + step*dmu1
newmu2 = mu2 + step*dmu2
newf1 = newz - lamb[l]
newf2 = -newz - lamb[l]
newResDual = DDT*newz - Dy + newmu1 - newmu2
newResCent = np.concatenate( (-newmu1*newf1-1/t,-newmu2*newf2-1/t) )
newResidual = np.concatenate( (newResDual,newResCent) )
if ( (max(max(newf1),max(newf2)) < 0) and (linalg.norm(newResidual) <= (1-ALPHA*step)*linalg.norm(residual)) ):
break
step = BETA*step
z = newz
mu1 = newmu1
mu2 = newmu2
f1 = newf1
f2 = newf2
x[:,l] = y-np.transpose(D)*z
E[l] = 0.5*sum((y-x[:,l])**2)+lamb[l]*sum(abs(D*x[:,l]))
# We may have a close solution that does not satisfy the duality gap
if (iters >= maxiter):
s[l] = False
if (display):
if (s[l]==True):
print('Solved to precision of duality gap {0}'.format( gap))
else:
print('Max iterations exceeded - solution may be inaccurate')
if full:
return x, E, s, lambdamax
return x
if __name__ == "__main__":
y = [1 ,1.1, 0.9, 1.1, 0.95, 2.1, 1.95, 2.0, 2.05, 3.11, 2.99, 3.05, 3.0]
print('Perform test')
x = pwc_tvdip(y,[1.0])
print(x)
| mit | 3,905,015,046,271,806,500 | 36.902564 | 132 | 0.526045 | false |
gauravssnl/python3-network-programming | big_udp_sender.py | 1 | 1217 | #!/usr/bin/env python3
# Send a big UDP datagram to learn the MTU of the network path.
try:
import IN
except:
import sys
# change this path as required. I am using virtualenv
sys.path.append('/usr/lib/python3.5/plat-x86_64-linux-gnu/'
)
import IN
import socket
if not hasattr(IN, 'IP_MTU'):
raise RuntimeError('cannot perfom MTU discovery on this combination of oS & Python distribution ')
def send_big_datagram(host, port):
print(host, port)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.IPPROTO_IP, IN.IP_MTU_DISCOVER, IN.IP_PMTUDISC_DO)
sock.connect((host, port))
try:
sock.send(b'#' * 65000)
except socket.error:
print('Alas, the dtatagram did not make it')
max_mtu = sock.getsockopt(socket.IPPROTO_IP, IN.IP_MTU)
print('Actual MTU: {}'.format(max_mtu))
else:
print('The big datagram was sent!')
if __name__ == '__main__':
import argparse
parser= argparse.ArgumentParser(description='Send UDP packet to get MTU')
parser.add_argument('host', help='host to which to target the packet')
parser.add_argument('-p', metavar='PORT', type=int, default=1060, help='UDP port(default 1060)')
args = parser.parse_args()
send_big_datagram(args.host, args.p) | mit | -1,474,168,285,964,429,300 | 30.230769 | 99 | 0.716516 | false |
cassiopaixao/simmycloud | simmycloud/strategies/prediction/last_five_measurements_prediction.py | 1 | 2109 | ###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2013 Cassio Paixao
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###############################################################################
from core.strategies import PredictionStrategy
class LastFiveMeasurementsPrediction(PredictionStrategy):
def initialize(self):
self.measurement_reader = self._config.module['MeasurementReader']
@PredictionStrategy.predict_strategy
def predict(self, vm_name):
last_five = self._get_last_five(vm_name)
cpu_average = sum(m[self.measurement_reader.CPU] for m in last_five) / len(last_five)
mem_average = sum(m[self.measurement_reader.MEM] for m in last_five) / len(last_five)
return (min(cpu_average, 1.0),
min(mem_average, 1.0))
def _get_last_five(self, vm_name):
measurements = self.measurement_reader.n_measurements_till(
vm_name,
5,
self._config.simulation_info.current_timestamp)
return measurements
| mit | -4,115,986,671,792,250,400 | 44.847826 | 93 | 0.671882 | false |
chaiku/fedmsg | fedmsg/core.py | 1 | 19301 | # This file is part of fedmsg.
# Copyright (C) 2012 - 2014 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <[email protected]>
#
import getpass
import socket
import threading
import datetime
import six
import time
import uuid
import warnings
import weakref
import zmq
from kitchen.iterutils import iterate
from kitchen.text.converters import to_bytes
import fedmsg.encoding
import fedmsg.crypto
from fedmsg.utils import (
set_high_water_mark,
guess_calling_module,
set_tcp_keepalive,
set_tcp_reconnect,
)
from fedmsg.replay import check_for_replay
import logging
class ValidationError(Exception):
""" Error used internally to represent a validation failure. """
def __init__(self, msg):
self.msg = msg
class FedMsgContext(object):
# A counter for messages sent.
_i = 0
def __init__(self, **config):
super(FedMsgContext, self).__init__()
self.log = logging.getLogger(__name__)
self.c = config
self.hostname = socket.gethostname().split('.', 1)[0]
# Prepare our context and publisher
self.context = zmq.Context(config['io_threads'])
method = ['bind', 'connect'][config['active']]
# If no name is provided, use the calling module's __name__ to decide
# which publishing endpoint to use (unless active=True, in which case
# we use "relay_inbound" as set in the subsequent code block).
if not config.get("name", None) and not config.get('active', False):
module_name = guess_calling_module(default="fedmsg")
config["name"] = module_name + '.' + self.hostname
if config["name"].startswith('fedmsg'):
config["name"] = None
# Do a little special-case mangling. We never want to "listen" to the
# relay_inbound address, but in the special case that we want to emit
# our messages there, we add it to the :ref:`conf-endpoints` dict so that
# the code below where we "Actually set up our publisher" can be
# simplified. See Issue #37 - https://bit.ly/KN6dEK
if config.get('active', False):
try:
name = config['name'] = config.get("name", "relay_inbound")
config['endpoints'][name] = config[name]
except KeyError:
raise KeyError("Could not find endpoint for fedmsg-relay."
" Try installing fedmsg-relay.")
# Actually set up our publisher, but only if we're configured for zmq.
if (
config.get('zmq_enabled', True) and
not config.get("mute", False) and
config.get("name", None) and
config.get("endpoints", None) and
config['endpoints'].get(config['name'])
):
# Construct it.
self.publisher = self.context.socket(zmq.PUB)
set_high_water_mark(self.publisher, config)
set_tcp_keepalive(self.publisher, config)
# Set a zmq_linger, thus doing a little bit more to ensure that our
# message gets to the fedmsg-relay (*if* we're talking to the relay
# which is the case when method == 'connect').
if method == 'connect':
self.publisher.setsockopt(zmq.LINGER, config['zmq_linger'])
# "Listify" our endpoints. If we're given a list, good. If we're
# given a single item, turn it into a list of length 1.
config['endpoints'][config['name']] = list(iterate(
config['endpoints'][config['name']]))
# Try endpoint after endpoint in the list of endpoints. If we
# succeed in establishing one, then stop. *That* is our publishing
# endpoint.
_established = False
for endpoint in config['endpoints'][config['name']]:
self.log.debug("Trying to %s to %s" % (method, endpoint))
if method == 'bind':
endpoint = "tcp://*:{port}".format(
port=endpoint.rsplit(':')[-1]
)
try:
# Call either bind or connect on the new publisher.
# This will raise an exception if there's another process
# already using the endpoint.
getattr(self.publisher, method)(endpoint)
# If we can do this successfully, then stop trying.
_established = True
break
except zmq.ZMQError:
# If we fail to bind or connect, there's probably another
# process already using that endpoint port. Try the next
# one.
pass
# If we make it through the loop without establishing our
# connection, then there are not enough endpoints listed in the
# config for the number of processes attempting to use fedmsg.
if not _established:
raise IOError(
"Couldn't find an available endpoint "
"for name %r" % config.get("name", None))
elif config.get('mute', False):
# Our caller doesn't intend to send any messages. Pass silently.
pass
elif config.get('zmq_enabled', True):
# Something is wrong.
warnings.warn(
"fedmsg is not configured to send any zmq messages "
"for name %r" % config.get("name", None))
else:
# We're not configured to send zmq messages, but zmq_enabled is
# False, so no need to warn the user.
pass
# Cleanup. See https://bit.ly/SaGeOr for discussion.
# arg signature - weakref.ref(object [, callback])
weakref.ref(threading.current_thread(), self.destroy)
# Sleep just to make sure that the socket gets set up before anyone
# tries anything. This is a documented zmq 'feature'.
time.sleep(config['post_init_sleep'])
def destroy(self):
""" Destroy a fedmsg context """
if getattr(self, 'publisher', None):
self.log.debug("closing fedmsg publisher")
self.log.debug("sent %i messages" % self._i)
self.publisher.close()
self.publisher = None
if getattr(self, 'context', None):
self.context.term()
self.context = None
def send_message(self, topic=None, msg=None, modname=None):
warnings.warn(
".send_message is deprecated. Use .publish", DeprecationWarning)
return self.publish(topic, msg, modname)
def publish(self, topic=None, msg=None, modname=None,
pre_fire_hook=None, **kw):
"""
Send a message over the publishing zeromq socket.
>>> import fedmsg
>>> fedmsg.publish(topic='testing', modname='test', msg={
... 'test': "Hello World",
... })
The above snippet will send the message ``'{test: "Hello World"}'``
over the ``<topic_prefix>.dev.test.testing`` topic. The fully qualified
topic of a message is constructed out of the following pieces:
<:ref:`conf-topic-prefix`>.<:ref:`conf-environment`>.<``modname``>.<``topic``>
This function (and other API functions) do a little bit more
heavy lifting than they let on. If the "zeromq context" is not yet
initialized, :func:`fedmsg.init` is called to construct it and
store it as :data:`fedmsg.__local.__context` before anything else is
done.
**An example from Fedora Tagger -- SQLAlchemy encoding**
Here's an example from
`fedora-tagger <https://github.com/fedora-infra/fedora-tagger>`_ that
sends the information about a new tag over
``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``::
>>> import fedmsg
>>> fedmsg.publish(topic='tag.update', msg={
... 'user': user,
... 'tag': tag,
... })
Note that the `tag` and `user` objects are SQLAlchemy objects defined
by tagger. They both have ``.__json__()`` methods which
:func:`fedmsg.publish` uses to encode both objects as stringified
JSON for you. Under the hood, specifically, ``.publish`` uses
:mod:`fedmsg.encoding` to do this.
``fedmsg`` has also guessed the module name (``modname``) of it's
caller and inserted it into the topic for you. The code from which
we stole the above snippet lives in
``fedoratagger.controllers.root``. ``fedmsg`` figured that out and
stripped it down to just ``fedoratagger`` for the final topic of
``org.fedoraproject.{dev,stg,prod}.fedoratagger.tag.update``.
**Shell Usage**
You could also use the ``fedmsg-logger`` from a shell script like so::
$ echo "Hello, world." | fedmsg-logger --topic testing
$ echo '{"foo": "bar"}' | fedmsg-logger --json-input
:param topic: The message topic suffix. This suffix is joined to the
configured topic prefix (e.g. ``org.fedoraproject``), environment
(e.g. ``prod``, ``dev``, etc.), and modname.
:type topic: unicode
:param msg: A message to publish. This message will be JSON-encoded
prior to being sent, so the object must be composed of JSON-
serializable data types. Please note that if this is already a
string JSON serialization will be applied to that string.
:type msg: dict
:param modname: The module name that is publishing the message. If this
is omitted, ``fedmsg`` will try to guess the name of the module
that called it and use that to produce an intelligent topic.
Specifying ``modname`` explicitly overrides this behavior.
:type modname: unicode
:param pre_fire_hook: A callable that will be called with a single
argument -- the dict of the constructed message -- just before it
is handed off to ZeroMQ for publication.
:type pre_fire_hook: function
"""
topic = topic or 'unspecified'
msg = msg or dict()
# If no modname is supplied, then guess it from the call stack.
modname = modname or guess_calling_module(default="fedmsg")
topic = '.'.join([modname, topic])
if topic[:len(self.c['topic_prefix'])] != self.c['topic_prefix']:
topic = '.'.join([
self.c['topic_prefix'],
self.c['environment'],
topic,
])
if isinstance(topic, six.text_type):
topic = to_bytes(topic, encoding='utf8', nonstring="passthru")
year = datetime.datetime.now().year
self._i += 1
msg = dict(
topic=topic.decode('utf-8'),
msg=msg,
timestamp=int(time.time()),
msg_id=str(year) + '-' + str(uuid.uuid4()),
i=self._i,
username=getpass.getuser(),
)
# Find my message-signing cert if I need one.
if self.c.get('sign_messages', False):
if not self.c.get("crypto_backend") == "gpg":
if 'cert_prefix' in self.c:
cert_index = "%s.%s" % (self.c['cert_prefix'],
self.hostname)
else:
cert_index = self.c['name']
if cert_index == 'relay_inbound':
cert_index = "shell.%s" % self.hostname
self.c['certname'] = self.c['certnames'][cert_index]
else:
if 'gpg_signing_key' not in self.c:
self.c['gpg_signing_key'] = self.c['gpg_keys'][self.hostname]
if self.c.get('sign_messages', False):
msg = fedmsg.crypto.sign(msg, **self.c)
store = self.c.get('persistent_store', None)
if store:
# Add the seq_id field
msg = store.add(msg)
if pre_fire_hook:
pre_fire_hook(msg)
# We handle zeromq publishing ourselves. But, if that is disabled,
# defer to the moksha' hub's twisted reactor to send messages (if
# available).
if self.c.get('zmq_enabled', True):
self.publisher.send_multipart(
[topic, fedmsg.encoding.dumps(msg).encode('utf-8')],
flags=zmq.NOBLOCK,
)
else:
# Perhaps we're using STOMP or AMQP? Let moksha handle it.
import moksha.hub
# First, a quick sanity check.
if not getattr(moksha.hub, '_hub', None):
raise AttributeError("Unable to publish non-zeromq msg "
"without moksha-hub initialization.")
# Let moksha.hub do our work.
moksha.hub._hub.send_message(
topic=topic,
message=fedmsg.encoding.dumps(msg).encode('utf-8'),
jsonify=False,
)
def tail_messages(self, topic="", passive=False, **kw):
"""
Subscribe to messages published on the sockets listed in :ref:`conf-endpoints`.
Args:
topic (six.text_type): The topic to subscribe to. The default is to
subscribe to all topics.
passive (bool): If ``True``, bind to the :ref:`conf-endpoints` sockets
instead of connecting to them. Defaults to ``False``.
**kw: Additional keyword arguments. Currently none are used.
Yields:
tuple: A 4-tuple in the form (name, endpoint, topic, message).
"""
if not self.c.get('zmq_enabled', True):
raise ValueError("fedmsg.tail_messages() is only available for "
"zeromq. Use the hub-consumer approach for "
"STOMP or AMQP support.")
poller, subs = self._create_poller(topic=topic, passive=False, **kw)
try:
for msg in self._poll(poller, subs):
yield msg
finally:
self._close_subs(subs)
def _create_poller(self, topic="", passive=False, **kw):
# TODO -- do the zmq_strict logic dance with "topic" here.
# It is buried in moksha.hub, but we need it to work the same way
# here.
# TODO -- the 'passive' here and the 'active' are ambiguous. They
# don't actually mean the same thing. This should be resolved.
method = (passive and 'bind') or 'connect'
failed_hostnames = []
subs = {}
for _name, endpoint_list in six.iteritems(self.c['endpoints']):
# You never want to actually subscribe to this thing, but sometimes
# it appears in the endpoints list due to a hack where it gets
# added in __init__ above.
if _name == 'relay_inbound':
continue
# Listify endpoint_list in case it is a single string
endpoint_list = iterate(endpoint_list)
for endpoint in endpoint_list:
# First, some sanity checking. zeromq will potentially
# segfault if we don't do this check.
hostname = endpoint.split(':')[1][2:]
if hostname in failed_hostnames:
continue
if hostname != '*':
try:
socket.gethostbyname_ex(hostname)
except: # noqa: E722
failed_hostnames.append(hostname)
self.log.warn("Couldn't resolve %r" % hostname)
continue
# OK, sanity checks pass. Create the subscriber and connect.
subscriber = self.context.socket(zmq.SUB)
subscriber.setsockopt(zmq.SUBSCRIBE, topic.encode('utf-8'))
set_high_water_mark(subscriber, self.c)
set_tcp_keepalive(subscriber, self.c)
set_tcp_reconnect(subscriber, self.c)
getattr(subscriber, method)(endpoint)
subs[subscriber] = (_name, endpoint)
# Register the sockets we just built with a zmq Poller.
poller = zmq.Poller()
for subscriber in subs:
poller.register(subscriber, zmq.POLLIN)
return (poller, subs)
def _poll(self, poller, subs):
watched_names = {}
for name, _ in subs.values():
if name in self.c.get("replay_endpoints", {}):
# At first we don't know where the sequence is at.
watched_names[name] = -1
# Poll that poller. This is much more efficient than it used to be.
while True:
sockets = dict(poller.poll())
for s in sockets:
name, ep = subs[s]
try:
yield self._run_socket(s, name, ep, watched_names)
except ValidationError as e:
warnings.warn("!! invalid message received: %r" % e.msg)
def _run_socket(self, sock, name, ep, watched_names=None):
if watched_names is None:
watched_names = {}
validate = self.c.get('validate_signatures', False)
# Grab the data off the zeromq internal queue
_topic, message = sock.recv_multipart()
# zmq hands us byte strings, so let's convert to unicode asap
_topic, message = _topic.decode('utf-8'), message.decode('utf-8')
# Now, decode the JSON body into a dict.
msg = fedmsg.encoding.loads(message)
if not validate or fedmsg.crypto.validate(msg, **self.c):
# If there is even a slight change of replay, use
# check_for_replay
if len(self.c.get('replay_endpoints', {})) > 0:
for m in check_for_replay(
name, watched_names,
msg, self.c, self.context):
# Revalidate all the replayed messages.
if not validate or \
fedmsg.crypto.validate(m, **self.c):
return name, ep, m['topic'], m
else:
raise ValidationError(msg)
else:
return (name, ep, _topic, msg)
else:
raise ValidationError(msg)
def _close_subs(self, subs):
for subscriber in subs:
subscriber.close()
| lgpl-2.1 | -1,765,499,945,606,338,300 | 39.294363 | 90 | 0.564375 | false |
anandpdoshi/erpnext | erpnext/config/desktop.py | 1 | 3446 | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Item",
"_doctype": "Item",
"color": "#f39c12",
"icon": "octicon octicon-package",
"type": "link",
"link": "List/Item"
},
{
"module_name": "Customer",
"_doctype": "Customer",
"color": "#1abc9c",
"icon": "octicon octicon-tag",
"type": "link",
"link": "List/Customer"
},
{
"module_name": "Supplier",
"_doctype": "Supplier",
"color": "#c0392b",
"icon": "octicon octicon-briefcase",
"type": "link",
"link": "List/Supplier"
},
{
"_doctype": "Employee",
"module_name": "Employee",
"color": "#2ecc71",
"icon": "octicon octicon-organization",
"type": "link",
"link": "List/Employee"
},
{
"module_name": "Project",
"_doctype": "Project",
"color": "#8e44ad",
"icon": "octicon octicon-rocket",
"type": "link",
"link": "List/Project"
},
{
"module_name": "Issue",
"color": "#2c3e50",
"icon": "octicon octicon-issue-opened",
"_doctype": "Issue",
"type": "link",
"link": "List/Issue"
},
{
"module_name": "Lead",
"icon": "octicon octicon-broadcast",
"type": "module",
"_doctype": "Lead",
"type": "link",
"link": "List/Lead"
},
{
"module_name": "Profit and Loss Statment",
"_doctype": "Account",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "link",
"link": "query-report/Profit and Loss Statement"
},
# old
{
"module_name": "Accounts",
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "module",
"hidden": 1
},
{
"module_name": "Stock",
"color": "#f39c12",
"icon": "icon-truck",
"icon": "octicon octicon-package",
"type": "module",
"hidden": 1
},
{
"module_name": "CRM",
"color": "#EF4DB6",
"icon": "octicon octicon-broadcast",
"type": "module",
"hidden": 1
},
{
"module_name": "Selling",
"color": "#1abc9c",
"icon": "icon-tag",
"icon": "octicon octicon-tag",
"type": "module",
"hidden": 1
},
{
"module_name": "Buying",
"color": "#c0392b",
"icon": "icon-shopping-cart",
"icon": "octicon octicon-briefcase",
"type": "module",
"hidden": 1
},
{
"module_name": "HR",
"color": "#2ecc71",
"icon": "icon-group",
"icon": "octicon octicon-organization",
"label": _("Human Resources"),
"type": "module",
"hidden": 1
},
{
"module_name": "Manufacturing",
"color": "#7f8c8d",
"icon": "icon-cogs",
"icon": "octicon octicon-tools",
"type": "module",
"hidden": 1
},
{
"module_name": "POS",
"color": "#589494",
"icon": "icon-th",
"icon": "octicon octicon-credit-card",
"type": "page",
"link": "pos",
"label": _("POS")
},
{
"module_name": "Projects",
"color": "#8e44ad",
"icon": "icon-puzzle-piece",
"icon": "octicon octicon-rocket",
"type": "module",
"hidden": 1
},
{
"module_name": "Support",
"color": "#2c3e50",
"icon": "icon-phone",
"icon": "octicon octicon-issue-opened",
"type": "module",
"hidden": 1
},
{
"module_name": "Learn",
"color": "#FF888B",
"icon": "octicon octicon-device-camera-video",
"type": "module",
"is_help": True,
"label": _("Learn"),
"hidden": 1
},
{
"module_name": "Maintenance",
"color": "#FF888B",
"icon": "octicon octicon-tools",
"type": "module",
"label": _("Maintenance")
}
]
| agpl-3.0 | 8,454,698,859,313,510,000 | 19.511905 | 51 | 0.529309 | false |
rhinstaller/python-simpleline | simpleline/input/input_threading.py | 1 | 7915 | #
# This file is part of Simpleline Text UI library.
#
# Copyright (C) 2020 Red Hat, Inc.
#
# Simpleline is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Simpleline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Simpleline. If not, see <https://www.gnu.org/licenses/>.
#
import threading
from abc import ABCMeta, abstractmethod
from simpleline import App
from simpleline.logging import get_simpleline_logger
from simpleline.event_loop.signals import InputReceivedSignal, InputReadySignal
log = get_simpleline_logger()
INPUT_THREAD_NAME = "SimplelineInputThread"
class InputThreadManager():
"""Manager object for input threads.
This manager helps with concurrent user input (still you really shouldn't do that).
"""
__instance = None
def __init__(self):
super().__init__()
self._input_stack = []
self._processing_input = False
@classmethod
def create_new_instance(cls):
instance = InputThreadManager()
cls.__instance = instance
instance._post_init_configuration() # pylint: disable=protected-access
def _post_init_configuration(self):
# pylint: disable=protected-access
App.get_event_loop().register_signal_handler(InputReceivedSignal,
self.__instance._input_received_handler)
@classmethod
def get_instance(cls):
if not cls.__instance:
cls.create_new_instance()
return cls.__instance
def _input_received_handler(self, signal, args):
thread_object = self._input_stack.pop()
thread_object.emit_input_ready_signal(signal.data)
if thread_object.thread:
thread_object.thread.join()
# wait until used object ends
for t in self._input_stack:
t.emit_failed_input_ready_signal()
if t.thread:
t.thread.join()
# remove all other items waiting for input
self._input_stack.clear()
self._processing_input = False
def start_input_thread(self, input_thread_object, concurrent_check=True):
"""Start input thread to get user input.
:param input_thread_object: Input thread object based on InputThread class.
:param concurrent_check: Should the concurrent thread check be fatal? (default True).
"""
self._input_stack.append(input_thread_object)
self._check_input_thread_running(concurrent_check)
self._start_user_input_async()
def _check_input_thread_running(self, raise_concurrent_check):
if len(self._input_stack) != 1:
if not raise_concurrent_check:
log.warning("Asking for multiple inputs with concurrent check bypassed, "
"last who asked wins! Others are dropped.")
else:
msg = ""
for t in self._input_stack:
requester_source = t.requester_source or "Unknown"
msg += "Input handler: {} Input requester: {}\n".format(t.source,
requester_source)
msg.rstrip()
raise KeyError("Can't run multiple input threads at the same time!\n"
"Asking for input:\n"
"{}".format(msg))
def _start_user_input_async(self):
thread_object = self._input_stack[-1]
if self._processing_input:
self._print_new_prompt(thread_object)
return
thread_object.initialize_thread()
self._processing_input = True
thread_object.start_thread()
@staticmethod
def _print_new_prompt(thread_object):
prompt = thread_object.text_prompt()
# print new prompt
print(prompt, end="")
class InputRequest(metaclass=ABCMeta):
"""Base input request class.
This should be overloaded for every InputHandler class. Purpose of this class is to print
prompt and get input from user.
The `run_input` method is the entry point for this class. Output from this method must be
a user input.
The `text_prompt` method is used to get textual representation of a prompt. This will be used
on concurrent input to replace existing prompt to get new input.
WARNING:
The `run_input` method will run in a separate thread!
"""
def __init__(self, source, requester_source=None):
super().__init__()
self._source = source
self._requester_source = requester_source
self.thread = None
@property
def source(self):
"""Get direct source of this input request.
:returns: InputHandler instance.
"""
return self._source
@property
def requester_source(self):
"""Get requester -- source of this input.
:returns: Anything probably UIScreen based instance.
"""
return self._requester_source
def emit_input_ready_signal(self, input_data):
"""Emit the InputReadySignal signal with collected input data.
:param input_data: Input data received.
:type input_data: str
"""
handler_source = self.source
signal_source = self._get_request_source()
new_signal = InputReadySignal(source=signal_source, input_handler_source=handler_source,
data=input_data, success=True)
App.get_event_loop().enqueue_signal(new_signal)
def emit_failed_input_ready_signal(self):
"""Emit the InputReadySignal with failed state."""
handler_source = self.source
signal_source = self._get_request_source()
new_signal = InputReadySignal(source=signal_source, input_handler_source=handler_source,
data="", success=False)
App.get_event_loop().enqueue_signal(new_signal)
def _get_request_source(self):
"""Get user input request source.
That means object who is using InputHandler.
If this object is not specified then return InputHandler as a source.
"""
return self.requester_source or self.source
def initialize_thread(self):
"""Initialize thread for this input request.
Do not call this directly! Will be called by InputThreadManager.
"""
self.thread = threading.Thread(name=INPUT_THREAD_NAME, target=self.run)
self.thread.daemon = True
def start_thread(self):
"""Start input thread.
Do not call this directly! Will be called by InputThreadManager.
"""
self.thread.start()
def run(self):
"""Run the `run_input` method and propagate input outside.
Do not call this method directly. It will be called by InputThreadManager.
"""
data = self.get_input()
App.get_event_loop().enqueue_signal(InputReceivedSignal(self, data))
@abstractmethod
def text_prompt(self):
"""Get text representation of the user prompt.
This will be used to get high priority input.
:returns: String representation of the prompt or None if no prompt is present.
"""
return None
@abstractmethod
def get_input(self):
"""Print prompt and get an input from user.
..NOTE: Overload this method in your class.
Return this input from a function.
"""
return ""
| gpl-2.0 | -6,910,217,499,271,234,000 | 32.256303 | 97 | 0.626027 | false |
WalternativE/boinso-gpredict-bridge | boinsogpredictbridge/utility.py | 1 | 2401 | import json
import requests
from enum import Enum
class Health(Enum):
unknown = 0
ok = 1
conn_err = 2
bad_status = 3
bad_protocol = 4
# cool trick found here:
# http://stackoverflow.com/questions/24481852/serialising-an-enum-member-to-json
class EnumEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Enum):
return {"__HEALTH__": str(obj)}
return json.JSONEncoder.default(self, obj)
def as_enum(d):
if "__HEALTH__" in d:
name, member = d["__HEALTH__"].split(".")
return getattr(globals()[name], member)
else:
return d
def create_default_conf():
conf = {
'gpredict_home': '~/gpredict',
'mccs': [
{
'api_root': 'https://coolmcc.at/api/',
'health': Health.unknown,
'skip': True
},
{
'api_root': 'https://supermcc.com/',
'health': Health.ok,
'skip': False
},
{
'api_root': 'https://grandmcc.edu.uk/',
'health': Health.conn_err,
'skip': True
}
]
}
with open('bridgeconf.json', 'w') as f:
json.dump(conf, f, cls=EnumEncoder, indent=4)
return conf
def read_config():
try:
with open('bridgeconf.json', 'r') as f:
return json.load(f, object_hook=as_enum)
except FileNotFoundError:
print('No config found - creating new one')
return create_default_conf()
def check_server_health(config=None):
if config is None:
config = read_config()
for mcc in config['mccs']:
print(mcc['api_root'])
if mcc['skip']:
print('Skipped')
continue
try:
r = requests.get(mcc['api_root'])
if r.status_code == 200:
mcc['health'] = Health.ok
else:
mcc['health'] = Health.bad_status
except ConnectionError:
mcc['health'] = Health.conn_err
except:
mcc['health'] = Health.unknown
finally:
print("Health: {}".format(mcc['health']))
# r = requests.get("{}api".format(config['mcc_roots'][0]))
def main():
print('Utility initialized as a script!')
if __name__ == '__main__':
main() | apache-2.0 | -8,413,413,291,807,577,000 | 23.510204 | 80 | 0.501874 | false |
bruecksen/isimip | isi_mip/climatemodels/migrations/0057_sector_class_name.py | 1 | 2105 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-19 13:23
from __future__ import unicode_literals
from django.db import migrations, models
SECTOR_MAPPING = {
'Agriculture': 'Agriculture',
'Energy': 'Energy',
'Water (global)': 'WaterGlobal',
'Water (regional)': 'WaterRegional',
'Biomes': 'Biomes',
'Forests': 'Forests',
'Marine Ecosystems and Fisheries (global)': 'MarineEcosystemsGlobal',
'Marine Ecosystems and Fisheries (regional)': 'MarineEcosystemsRegional',
'Biodiversity': 'Biodiversity',
'Health': 'Health',
'Coastal Infrastructure': 'CoastalInfrastructure',
'Permafrost': 'Permafrost',
'Computable General Equilibrium Modelling': 'ComputableGeneralEquilibriumModelling',
'Agro-Economic Modelling': 'AgroEconomicModelling',
}
def set_sectors_mapping(apps, schema_editor):
SectorModel = apps.get_model('climatemodels', 'Sector')
for sector in SectorModel.objects.all():
sector.class_name = SECTOR_MAPPING.get(sector.name)
sector.save()
class Migration(migrations.Migration):
dependencies = [
('climatemodels', '0056_remove_outputdata_sector_old'),
]
operations = [
migrations.AddField(
model_name='sector',
name='class_name',
field=models.CharField(choices=[('Generic Sector', 'GenericSector'), ('Agriculture', 'Agriculture'), ('Energy', 'Energy'), ('Water (global)', 'WaterGlobal'), ('Water (regional)', 'WaterRegional'), ('Biomes', 'Biomes'), ('Forests', 'Forests'), ('Marine Ecosystems and Fisheries (global)', 'MarineEcosystemsGlobal'), ('Marine Ecosystems and Fisheries (regional)', 'MarineEcosystemsRegional'), ('Biodiversity', 'Biodiversity'), ('Health', 'Health'), ('Coastal Infrastructure', 'CoastalInfrastructure'), ('Permafrost', 'Permafrost'), ('Computable General Equilibrium Modelling', 'ComputableGeneralEquilibriumModelling'), ('Agro-Economic Modelling', 'AgroEconomicModelling')], default='GenericSector', max_length=500),
),
migrations.RunPython(
set_sectors_mapping
),
]
| mit | 3,835,449,880,308,503,600 | 42.854167 | 725 | 0.676485 | false |
Kudae/SR_Parser | html_test.py | 1 | 1134 | from bs4 import BeautifulSoup as bs
import sys
htmlfile = sys.argv[1]
htmlfile = open(htmlfile,'r', encoding="utf-8")
bs_html = bs(htmlfile, 'lxml')
table = bs_html.find('table')
headings = []
for th in table.find('tr').find_all('th'):
headings.append(th.get_text())
datasets = []
for row in table.find_all("tr")[1:]:
# for td in row.find_all("td"):
# for br in td.find_all("br"):
# br.replace_with("\n")
dataset = zip(headings, (td.get_text() for td in row.find_all("td")))
datasets.append(dataset)
dict_data = []
for row in datasets:
row = list(row)
dict_data.append(dict(row))
wanted = ['Activity Type', 'Description', 'Created', 'Comment']
new_file = open('new_file.html', 'w+')
new_file.write('<p>')
for row in dict_data:
for key in row:
if key in wanted:
new_file.write('{} == {}'.format(key, row[key]))
#print('{} == {}'.format(key, row[key]))
new_file.write('<br>')
new_file.write('- - - ' * 5)
new_file.write('<br>')
#print('\n')
#print('- - ' * 5)
#print('\n')
new_file.write('<p>')
new_file.close()
| mit | 7,239,021,722,167,248,000 | 22.142857 | 73 | 0.564374 | false |
embedded2015/visualizer | log2grasp.py | 1 | 7397 | #!/usr/bin/env python
# Copyright (C) 2013 National Cheng Kung University, Taiwan
# All rights reserved.
# Configure wether to trace these feature
# Warning : Too many contents may freeze Grasp
TRACE_QUEUE = True
TRACE_MUTEX = True
TRACE_BINARY_SEMAPHORE = False
TRACE_INTERRUPT = False
log = open('log', 'r')
lines = log.readlines()
tasks = {}
events = []
mutexes = {}
all_queues = {}
binsems = {}
queues = {}
for line in lines :
line = line.strip()
inst, args = line.split(' ', 1)
if inst == 'task' :
id, priority, name = args.split(' ', 2)
task = {}
task['no'] = str(len(tasks) + 1)
task['priority'] = int(priority)
task['name'] = task['no'] + ": " + name.strip()
task['created'] = True
tasks[id] = task
elif inst == 'switch' :
out_task, in_task, tick, tick_reload, out_minitick, in_minitick = args.split(' ')
out_time = (int(tick) + (int(tick_reload) - int(out_minitick)) / int(tick_reload)) / 100 * 1000;
in_time = (int(tick) + (int(tick_reload) - int(in_minitick)) / int(tick_reload)) / 100 * 1000;
event = {}
event['type'] = 'task out'
event['task'] = out_task
event['time'] = out_time
event['next'] = in_task
events.append(event);
event = {}
event['type'] = 'task in'
event['task'] = in_task
event['time'] = in_time
events.append(event);
last_task = in_task
elif inst == 'mutex' and TRACE_MUTEX :
task, id = args.split(' ')
mutex = {}
mutex['type'] = 'mutex'
mutex['name'] = 'Mutex ' + str(len(mutexes) + 1)
time, mutex['id'] = args.split(' ')
mutexes[id] = mutex;
all_queues[id] = mutex;
elif inst == 'queue' :
act, args = args.split(' ', 1)
if act == 'create' :
time, id, queue_type, queue_size = args.split(' ')
if queue_type == '0' and TRACE_QUEUE :
queue = {}
queue['type'] = 'queue'
queue['name'] = 'Queue ' + str(len(queues) + 1)
queue['size'] = queue_size
queues[id] = queue
all_queues[id] = queue
if queue_type == '3' and TRACE_BINARY_SEMAPHORE : # Binary semaphore, see FreeRTOS/queue.c
binsem = {}
binsem['type'] = 'binary semaphore'
binsem['name'] = "Binary Semaphore " + str(len(binsems) + 1)
binsems[id] = binsem;
all_queues[id] = binsem;
elif act == 'send' or act == 'recv' :
time, task_id, id = args.split(' ')
if id in all_queues and int(time) > 0 :
queue = all_queues[id]
event = {}
event['target'] = id
event['task'] = task_id
event['time'] = float(time) / 1000
if queue['type'] == 'mutex' :
event['type'] = 'mutex ' + ('take' if act == 'recv' else 'give')
queue['acquired'] = True if act == 'recv' else False
if act == 'recv' :
queue['last_acquire'] = last_task
elif queue['type'] == 'binary semaphore' :
event['type'] = 'semaphore ' + ('take' if act == 'recv' else 'give')
elif queue['type'] == 'queue' :
event['type'] = 'queue ' + act
# No type match
else :
continue
# For interrupt, which is not declared explicitly
if task_id not in tasks :
task = {}
task['no'] = str(len(tasks) + 1)
task['priority'] = -1
task['name'] = task['no'] + ": Interrupt " + task_id
tasks[task_id] = task
events.append(event);
elif act == 'block' :
time, task_id, id = args.split(' ')
if id in all_queues and all_queues[id]['type'] == 'binary semaphore':
event = {}
event['target'] = id
event['time'] = float(time) / 1000
event['type'] = 'semaphore block'
event['task'] = task_id
events.append(event);
elif inst == 'interrupt' :
argv = (args + ' ').split(' ')
dir, time, int_num = argv[0:3]
if TRACE_INTERRUPT :
if int_num not in tasks :
task = {}
task['no'] = str(len(tasks) + 1)
task['priority'] = -int(argv[3]) - 1
task['name'] = task['no'] + ": Interrupt " + int_num
tasks[int_num] = task
event = {}
event['time'] = float(time) / 1000
event['task'] = int_num
if dir == 'in' :
event['type'] = 'interrupt in'
event['prev'] = last_task
tasks[int_num]['prev'] = last_task
last_task = int_num
else :
event['type'] = 'interrupt out'
event['prev'] = tasks[int_num]['prev']
last_task = tasks[int_num]['prev']
events.append(event)
tasks[int_num]['created'] = True if dir == 'in' else False
log.close()
grasp = open('sched.grasp', 'w')
for id in tasks :
task = tasks[id]
grasp.write('newTask task%s -priority %s %s -name "%s"\n' % (id, task['priority'], '-kind isr' if int(id) < 256 else '', task['name']))
for id in mutexes :
mutex = mutexes[id]
grasp.write('newMutex mutex%s -name "%s"\n' % (id, mutex['name']))
for id in binsems :
sem = binsems[id]
grasp.write('newSemaphore semaphore%s -name "%s"\n' % (id, sem['name']))
for id in queues :
queue = queues[id]
grasp.write('newBuffer Buffer%s -name "%s"\n' % (id, queue['name']))
for id in queues :
queue = queues[id]
grasp.write('bufferplot 0 resize Buffer%s %s\n' % (id, queue['size']))
for id in tasks :
task = tasks[id]
if int(id) > 255 or not TRACE_INTERRUPT :
grasp.write('plot 0 jobArrived job%s.1 task%s\n' % (id, id))
for event in events :
if event['type'] == 'task out' :
grasp.write('plot %f jobPreempted job%s.1 -target job%s.1\n' %
(event['time'], event['task'], event['next']))
elif event['type'] == 'task in' :
grasp.write('plot %f jobResumed job%s.1\n' %
(event['time'], event['task']))
elif event['type'] == 'mutex give' :
grasp.write('plot %f jobReleasedMutex job%s.1 mutex%s\n' % (event['time'], event['task'], event['target']));
elif event['type'] == 'mutex take' :
grasp.write('plot %f jobAcquiredMutex job%s.1 mutex%s\n'% (event['time'], event['task'], event['target']));
elif event['type'] == 'queue send' :
grasp.write('bufferplot %f push Buffer%s "%s"\n'% (event['time'], event['target'], tasks[event['task']]['no']));
elif event['type'] == 'queue recv' :
grasp.write('bufferplot %f pop Buffer%s\n'% (event['time'], event['target']));
elif event['type'] == 'semaphore give' :
grasp.write('plot %f jobReleasedSemaphore job%s.1 semaphore%s\n' % (event['time'], event['task'], event['target']));
elif event['type'] == 'semaphore take' :
grasp.write('plot %f jobAcquiredSemaphore job%s.1 semaphore%s\n'% (event['time'], event['task'], event['target']));
elif event['type'] == 'semaphore block' :
grasp.write('plot %f jobSuspendedOnSemaphore job%s.1 semaphore%s\n'% (event['time'], event['task'], event['target']));
elif event['type'] == 'interrupt in' :
grasp.write('plot %f jobArrived job%s.1 task%s\n' % (event['time'], event['task'], event['task']))
grasp.write('plot %f jobResumed job%s.1\n' % (event['time'], event['task']))
grasp.write('plot %f jobPreempted job%s.1 -target job%s.1\n' %
(event['time'], event['prev'], event['task']))
elif event['type'] == 'interrupt out' :
grasp.write('plot %f jobCompleted job%s.1\n' % (event['time'], event['task']))
grasp.write('plot %f jobResumed job%s.1\n' % (event['time'], event['prev']))
# Clean up unended operations
for id in mutexes :
mutex = mutexes[id]
if mutex['acquired'] :
grasp.write('plot %f jobReleasedMutex job%s.1 mutex%s\n' %
(events[-1]['time'], mutex['last_acquire'], id));
for id in tasks :
task = tasks[id]
if 'created' in task and task['created'] :
grasp.write('plot %f jobCompleted job%s.1\n' %
(events[-1]['time'], id))
grasp.close()
| bsd-2-clause | 3,598,301,657,431,681,000 | 28.706827 | 136 | 0.592943 | false |
abrt/faf | src/pyfaf/storage/migrations/versions/133991a89da4_build_to_opsysrelease.py | 1 | 1720 | # Copyright (C) 2014 ABRT Team
# Copyright (C) 2014 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
"""
Assign build to operating system, release and architecture
Revision ID: 133991a89da4
Revises: 17d4911132f8
Create Date: 2016-09-08 09:08:26.035450
"""
from alembic.op import create_table, drop_table
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '133991a89da4'
down_revision = '17d4911132f8'
def upgrade() -> None:
create_table('buildopsysreleasearch',
sa.Column('build_id', sa.Integer(), nullable=False),
sa.Column('opsysrelease_id', sa.Integer(), nullable=False),
sa.Column('arch_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['build_id'], ['builds.id'], ),
sa.ForeignKeyConstraint(['opsysrelease_id'], ['opsysreleases.id'], ),
sa.ForeignKeyConstraint(['arch_id'], ['archs.id'], ),
sa.PrimaryKeyConstraint('build_id', 'opsysrelease_id', 'arch_id'),
)
def downgrade() -> None:
drop_table('buildopsysreleasearch')
| gpl-3.0 | 1,110,659,406,465,646,700 | 34.102041 | 86 | 0.676163 | false |
google-research/motion_imitation | motion_imitation/examples/whole_body_controller_example.py | 1 | 8007 | """Example of whole body controller on A1 robot."""
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from absl import app
from absl import flags
from absl import logging
from datetime import datetime
import numpy as np
import os
import scipy.interpolate
import time
import pybullet_data
from pybullet_utils import bullet_client
import pybullet # pytype:disable=import-error
from mpc_controller import com_velocity_estimator
from mpc_controller import gait_generator as gait_generator_lib
from mpc_controller import locomotion_controller
from mpc_controller import openloop_gait_generator
from mpc_controller import raibert_swing_leg_controller
#from mpc_controller import torque_stance_leg_controller
#import mpc_osqp
from mpc_controller import torque_stance_leg_controller_quadprog as torque_stance_leg_controller
from motion_imitation.robots import a1
from motion_imitation.robots import robot_config
from motion_imitation.robots.gamepad import gamepad_reader
flags.DEFINE_string("logdir", None, "where to log trajectories.")
flags.DEFINE_bool("use_gamepad", False,
"whether to use gamepad to provide control input.")
flags.DEFINE_bool("use_real_robot", False,
"whether to use real robot or simulation")
flags.DEFINE_bool("show_gui", False, "whether to show GUI.")
flags.DEFINE_float("max_time_secs", 1., "maximum time to run the robot.")
FLAGS = flags.FLAGS
_NUM_SIMULATION_ITERATION_STEPS = 300
_MAX_TIME_SECONDS = 30.
_STANCE_DURATION_SECONDS = [
0.3
] * 4 # For faster trotting (v > 1.5 ms reduce this to 0.13s).
# Standing
# _DUTY_FACTOR = [1.] * 4
# _INIT_PHASE_FULL_CYCLE = [0., 0., 0., 0.]
# _INIT_LEG_STATE = (
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# )
# Tripod
# _DUTY_FACTOR = [.8] * 4
# _INIT_PHASE_FULL_CYCLE = [0., 0.25, 0.5, 0.]
# _INIT_LEG_STATE = (
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.STANCE,
# gait_generator_lib.LegState.SWING,
# )
# Trotting
_DUTY_FACTOR = [0.6] * 4
_INIT_PHASE_FULL_CYCLE = [0.9, 0, 0, 0.9]
_INIT_LEG_STATE = (
gait_generator_lib.LegState.SWING,
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.STANCE,
gait_generator_lib.LegState.SWING,
)
def _generate_example_linear_angular_speed(t):
"""Creates an example speed profile based on time for demo purpose."""
vx = 0.6
vy = 0.2
wz = 0.8
time_points = (0, 5, 10, 15, 20, 25, 30)
speed_points = ((0, 0, 0, 0), (0, 0, 0, wz), (vx, 0, 0, 0), (0, 0, 0, -wz),
(0, -vy, 0, 0), (0, 0, 0, 0), (0, 0, 0, wz))
speed = scipy.interpolate.interp1d(time_points,
speed_points,
kind="previous",
fill_value="extrapolate",
axis=0)(t)
return speed[0:3], speed[3], False
def _setup_controller(robot):
"""Demonstrates how to create a locomotion controller."""
desired_speed = (0, 0)
desired_twisting_speed = 0
gait_generator = openloop_gait_generator.OpenloopGaitGenerator(
robot,
stance_duration=_STANCE_DURATION_SECONDS,
duty_factor=_DUTY_FACTOR,
initial_leg_phase=_INIT_PHASE_FULL_CYCLE,
initial_leg_state=_INIT_LEG_STATE)
window_size = 20 if not FLAGS.use_real_robot else 1
state_estimator = com_velocity_estimator.COMVelocityEstimator(
robot, window_size=window_size)
sw_controller = raibert_swing_leg_controller.RaibertSwingLegController(
robot,
gait_generator,
state_estimator,
desired_speed=desired_speed,
desired_twisting_speed=desired_twisting_speed,
desired_height=robot.MPC_BODY_HEIGHT,
foot_clearance=0.01)
st_controller = torque_stance_leg_controller.TorqueStanceLegController(
robot,
gait_generator,
state_estimator,
desired_speed=desired_speed,
desired_twisting_speed=desired_twisting_speed,
desired_body_height=robot.MPC_BODY_HEIGHT
#,qp_solver = mpc_osqp.QPOASES #or mpc_osqp.OSQP
)
controller = locomotion_controller.LocomotionController(
robot=robot,
gait_generator=gait_generator,
state_estimator=state_estimator,
swing_leg_controller=sw_controller,
stance_leg_controller=st_controller,
clock=robot.GetTimeSinceReset)
return controller
def _update_controller_params(controller, lin_speed, ang_speed):
controller.swing_leg_controller.desired_speed = lin_speed
controller.swing_leg_controller.desired_twisting_speed = ang_speed
controller.stance_leg_controller.desired_speed = lin_speed
controller.stance_leg_controller.desired_twisting_speed = ang_speed
def main(argv):
"""Runs the locomotion controller example."""
del argv # unused
# Construct simulator
if FLAGS.show_gui and not FLAGS.use_real_robot:
p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
p = bullet_client.BulletClient(connection_mode=pybullet.DIRECT)
p.setPhysicsEngineParameter(numSolverIterations=30)
p.setTimeStep(0.001)
p.setGravity(0, 0, -9.8)
p.setPhysicsEngineParameter(enableConeFriction=0)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.loadURDF("plane.urdf")
# Construct robot class:
if FLAGS.use_real_robot:
from motion_imitation.robots import a1_robot
robot = a1_robot.A1Robot(
pybullet_client=p,
motor_control_mode=robot_config.MotorControlMode.HYBRID,
enable_action_interpolation=False,
time_step=0.002,
action_repeat=1)
else:
robot = a1.A1(p,
motor_control_mode=robot_config.MotorControlMode.HYBRID,
enable_action_interpolation=False,
reset_time=2,
time_step=0.002,
action_repeat=1)
controller = _setup_controller(robot)
controller.reset()
if FLAGS.use_gamepad:
gamepad = gamepad_reader.Gamepad()
command_function = gamepad.get_command
else:
command_function = _generate_example_linear_angular_speed
if FLAGS.logdir:
logdir = os.path.join(FLAGS.logdir,
datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))
os.makedirs(logdir)
start_time = robot.GetTimeSinceReset()
current_time = start_time
com_vels, imu_rates, actions = [], [], []
while current_time - start_time < FLAGS.max_time_secs:
#time.sleep(0.0008) #on some fast computer, works better with sleep on real A1?
start_time_robot = current_time
start_time_wall = time.time()
# Updates the controller behavior parameters.
lin_speed, ang_speed, e_stop = command_function(current_time)
# print(lin_speed)
if e_stop:
logging.info("E-stop kicked, exiting...")
break
_update_controller_params(controller, lin_speed, ang_speed)
controller.update()
hybrid_action, _ = controller.get_action()
com_vels.append(np.array(robot.GetBaseVelocity()).copy())
imu_rates.append(np.array(robot.GetBaseRollPitchYawRate()).copy())
actions.append(hybrid_action)
robot.Step(hybrid_action)
current_time = robot.GetTimeSinceReset()
if not FLAGS.use_real_robot:
expected_duration = current_time - start_time_robot
actual_duration = time.time() - start_time_wall
if actual_duration < expected_duration:
time.sleep(expected_duration - actual_duration)
print("actual_duration=", actual_duration)
if FLAGS.use_gamepad:
gamepad.stop()
if FLAGS.logdir:
np.savez(os.path.join(logdir, 'action.npz'),
action=actions,
com_vels=com_vels,
imu_rates=imu_rates)
logging.info("logged to: {}".format(logdir))
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 2,522,982,932,441,971,700 | 32.224066 | 96 | 0.67978 | false |
tpltnt/SimpleCV | SimpleCV/Camera.py | 1 | 131888 | from __future__ import print_function
# SimpleCV Cameras & Devices
#load system libraries
from SimpleCV.base import *
from SimpleCV.ImageClass import Image, ImageSet, ColorSpace
from SimpleCV.Display import Display
from SimpleCV.Color import Color
from collections import deque
import time
import ctypes as ct
import subprocess
import cv2
import numpy as np
import traceback
import sys
#Globals
_cameras = []
_camera_polling_thread = ""
_index = []
class FrameBufferThread(threading.Thread):
"""
**SUMMARY**
This is a helper thread which continually debuffers the camera frames. If
you don't do this, cameras may constantly give you a frame behind, which
causes problems at low sample rates. This makes sure the frames returned
by your camera are fresh.
"""
def run(self):
global _cameras
while (1):
for cam in _cameras:
if cam.pygame_camera:
cam.pygame_buffer = cam.capture.get_image(cam.pygame_buffer)
else:
cv.GrabFrame(cam.capture)
cam._threadcapturetime = time.time()
time.sleep(0.04) #max 25 fps, if you're lucky
class FrameSource:
"""
**SUMMARY**
An abstract Camera-type class, for handling multiple types of video input.
Any sources of images inheirit from it
"""
_calibMat = "" #Intrinsic calibration matrix
_distCoeff = "" #Distortion matrix
_threadcapturetime = '' #when the last picture was taken
capturetime = '' #timestamp of the last aquired image
def __init__(self):
return
def getProperty(self, p):
return None
def getAllProperties(self):
return {}
def getImage(self):
return None
def calibrate(self, imageList, grid_sz=0.03, dimensions=(8, 5)):
"""
**SUMMARY**
Camera calibration will help remove distortion and fisheye effects
It is agnostic of the imagery source, and can be used with any camera
The easiest way to run calibration is to run the
calibrate.py file under the tools directory for SimpleCV.
This will walk you through the calibration process.
**PARAMETERS**
* *imageList* - is a list of images of color calibration images.
* *grid_sz* - is the actual grid size of the calibration grid, the unit used will be
the calibration unit value (i.e. if in doubt use meters, or U.S. standard)
* *dimensions* - is the the count of the *interior* corners in the calibration grid.
So for a grid where there are 4x4 black grid squares has seven interior corners.
**RETURNS**
The camera's intrinsic matrix.
**EXAMPLE**
See :py:module:calibrate.py
"""
# This routine was adapted from code originally written by:
# Abid. K -- [email protected]
# See: https://github.com/abidrahmank/OpenCV-Python/blob/master/Other_Examples/camera_calibration.py
warn_thresh = 1
n_boards = 0 #no of boards
board_w = int(dimensions[0]) # number of horizontal corners
board_h = int(dimensions[1]) # number of vertical corners
n_boards = int(len(imageList))
board_n = board_w * board_h # no of total corners
board_sz = (board_w, board_h) #size of board
if( n_boards < warn_thresh ):
logger.warning("FrameSource.calibrate: We suggest using 20 or more images to perform camera calibration!" )
# creation of memory storages
image_points = cv.CreateMat(n_boards * board_n, 2, cv.CV_32FC1)
object_points = cv.CreateMat(n_boards * board_n, 3, cv.CV_32FC1)
point_counts = cv.CreateMat(n_boards, 1, cv.CV_32SC1)
intrinsic_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
distortion_coefficient = cv.CreateMat(5, 1, cv.CV_32FC1)
# capture frames of specified properties and modification of matrix values
i = 0
z = 0 # to print number of frames
successes = 0
imgIdx = 0
# capturing required number of views
while(successes < n_boards):
found = 0
img = imageList[imgIdx]
(found, corners) = cv.FindChessboardCorners(img.getGrayscaleMatrix(), board_sz,
cv.CV_CALIB_CB_ADAPTIVE_THRESH |
cv.CV_CALIB_CB_FILTER_QUADS)
corners = cv.FindCornerSubPix(img.getGrayscaleMatrix(), corners,(11, 11),(-1, -1),
(cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1))
# if got a good image,draw chess board
if found == 1:
corner_count = len(corners)
z = z + 1
# if got a good image, add to matrix
if len(corners) == board_n:
step = successes * board_n
k = step
for j in range(board_n):
cv.Set2D(image_points, k, 0, corners[j][0])
cv.Set2D(image_points, k, 1, corners[j][1])
cv.Set2D(object_points, k, 0, grid_sz*(float(j)/float(board_w)))
cv.Set2D(object_points, k, 1, grid_sz*(float(j)%float(board_w)))
cv.Set2D(object_points, k, 2, 0.0)
k = k + 1
cv.Set2D(point_counts, successes, 0, board_n)
successes = successes + 1
# now assigning new matrices according to view_count
if( successes < warn_thresh ):
logger.warning("FrameSource.calibrate: You have %s good images for calibration we recommend at least %s" % (successes, warn_thresh))
object_points2 = cv.CreateMat(successes * board_n, 3, cv.CV_32FC1)
image_points2 = cv.CreateMat(successes * board_n, 2, cv.CV_32FC1)
point_counts2 = cv.CreateMat(successes, 1, cv.CV_32SC1)
for i in range(successes * board_n):
cv.Set2D(image_points2, i, 0, cv.Get2D(image_points, i, 0))
cv.Set2D(image_points2, i, 1, cv.Get2D(image_points, i, 1))
cv.Set2D(object_points2, i, 0, cv.Get2D(object_points, i, 0))
cv.Set2D(object_points2, i, 1, cv.Get2D(object_points, i, 1))
cv.Set2D(object_points2, i, 2, cv.Get2D(object_points, i, 2))
for i in range(successes):
cv.Set2D(point_counts2, i, 0, cv.Get2D(point_counts, i, 0))
cv.Set2D(intrinsic_matrix, 0, 0, 1.0)
cv.Set2D(intrinsic_matrix, 1, 1, 1.0)
rcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1)
tcv = cv.CreateMat(n_boards, 3, cv.CV_64FC1)
# camera calibration
cv.CalibrateCamera2(object_points2, image_points2, point_counts2,
(img.width, img.height), intrinsic_matrix,distortion_coefficient,
rcv, tcv, 0)
self._calibMat = intrinsic_matrix
self._distCoeff = distortion_coefficient
return intrinsic_matrix
def getCameraMatrix(self):
"""
**SUMMARY**
This function returns a cvMat of the camera's intrinsic matrix.
If there is no matrix defined the function returns None.
"""
return self._calibMat
def undistort(self, image_or_2darray):
"""
**SUMMARY**
If given an image, apply the undistortion given by the camera's matrix and return the result.
If given a 1xN 2D cvmat or a 2xN numpy array, it will un-distort points of
measurement and return them in the original coordinate system.
**PARAMETERS**
* *image_or_2darray* - an image or an ndarray.
**RETURNS**
The undistorted image or the undistorted points. If the camera is un-calibrated
we return None.
**EXAMPLE**
>>> img = cam.getImage()
>>> result = cam.undistort(img)
"""
if(type(self._calibMat) != cv.cvmat or type(self._distCoeff) != cv.cvmat ):
logger.warning("FrameSource.undistort: This operation requires calibration, please load the calibration matrix")
return None
if (type(image_or_2darray) == InstanceType and image_or_2darray.__class__ == Image):
inImg = image_or_2darray # we have an image
retVal = inImg.getEmpty()
cv.Undistort2(inImg.getBitmap(), retVal, self._calibMat, self._distCoeff)
return Image(retVal)
else:
mat = ''
if (type(image_or_2darray) == cv.cvmat):
mat = image_or_2darray
else:
arr = cv.fromarray(np.array(image_or_2darray))
mat = cv.CreateMat(cv.GetSize(arr)[1], 1, cv.CV_64FC2)
cv.Merge(arr[:, 0], arr[:, 1], None, None, mat)
upoints = cv.CreateMat(cv.GetSize(mat)[1], 1, cv.CV_64FC2)
cv.UndistortPoints(mat, upoints, self._calibMat, self._distCoeff)
#undistorted.x = (x* focalX + principalX);
#undistorted.y = (y* focalY + principalY);
return (np.array(upoints[:, 0]) *\
[self.getCameraMatrix()[0, 0], self.getCameraMatrix()[1, 1]] +\
[self.getCameraMatrix()[0, 2], self.getCameraMatrix()[1, 2]])[:, 0]
def getImageUndistort(self):
"""
**SUMMARY**
Using the overridden getImage method we retrieve the image and apply the undistortion
operation.
**RETURNS**
The latest image from the camera after applying undistortion.
**EXAMPLE**
>>> cam = Camera()
>>> cam.loadCalibration("mycam.xml")
>>> while True:
>>> img = cam.getImageUndistort()
>>> img.show()
"""
return self.undistort(self.getImage())
def saveCalibration(self, filename):
"""
**SUMMARY**
Save the calibration matrices to file. The file name should be without the extension.
The default extension is .xml.
**PARAMETERS**
* *filename* - The file name, without an extension, to which to save the calibration data.
**RETURNS**
Returns true if the file was saved , false otherwise.
**EXAMPLE**
See :py:module:calibrate.py
"""
if( type(self._calibMat) != cv.cvmat ):
logger.warning("FrameSource.saveCalibration: No calibration matrix present, can't save.")
else:
intrFName = filename + "Intrinsic.xml"
cv.Save(intrFName, self._calibMat)
if( type(self._distCoeff) != cv.cvmat ):
logger.warning("FrameSource.saveCalibration: No calibration distortion present, can't save.")
else:
distFName = filename + "Distortion.xml"
cv.Save(distFName, self._distCoeff)
return None
def loadCalibration(self, filename):
"""
**SUMMARY**
Load a calibration matrix from file.
The filename should be the stem of the calibration files names.
e.g. If the calibration files are MyWebcamIntrinsic.xml and MyWebcamDistortion.xml
then load the calibration file "MyWebcam"
**PARAMETERS**
* *filename* - The file name, without an extension, to which to save the calibration data.
**RETURNS**
Returns true if the file was loaded , false otherwise.
**EXAMPLE**
See :py:module:calibrate.py
"""
retVal = False
intrFName = filename + "Intrinsic.xml"
self._calibMat = cv.Load(intrFName)
distFName = filename + "Distortion.xml"
self._distCoeff = cv.Load(distFName)
if( type(self._distCoeff) == cv.cvmat
and type(self._calibMat) == cv.cvmat):
retVal = True
return retVal
def live(self):
"""
**SUMMARY**
This shows a live view of the camera.
**EXAMPLE**
To use it's as simple as:
>>> cam = Camera()
>>> cam.live()
Left click will show mouse coordinates and color
Right click will kill the live image
"""
start_time = time.time()
from SimpleCV.Display import Display
i = self.getImage()
d = Display(i.size())
i.save(d)
col = Color.RED
while d.isNotDone():
i = self.getImage()
elapsed_time = time.time() - start_time
if d.mouseLeft:
txt = "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + ")"
i.dl().text(txt, (10,i.height / 2), color=col)
txt = "color: " + str(i.getPixel(d.mouseX,d.mouseY))
i.dl().text(txt, (10,(i.height / 2) + 10), color=col)
print("coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY)))
if elapsed_time > 0 and elapsed_time < 5:
i.dl().text("In live mode", (10,10), color=col)
i.dl().text("Left click will show mouse coordinates and color", (10,20), color=col)
i.dl().text("Right click will kill the live image", (10,30), color=col)
i.save(d)
if d.mouseRight:
print("Closing Window")
d.done = True
pg.quit()
class Camera(FrameSource):
"""
**SUMMARY**
The Camera class is the class for managing input from a basic camera. Note
that once the camera is initialized, it will be locked from being used
by other processes. You can check manually if you have compatible devices
on linux by looking for /dev/video* devices.
This class wrappers OpenCV's cvCapture class and associated methods.
Read up on OpenCV's CaptureFromCAM method for more details if you need finer
control than just basic frame retrieval
"""
capture = "" #cvCapture object
thread = ""
pygame_camera = False
pygame_buffer = ""
prop_map = {"width": cv2.CAP_PROP_FRAME_WIDTH,
"height": cv2.CAP_PROP_FRAME_HEIGHT,
"brightness": cv2.CAP_PROP_BRIGHTNESS,
"contrast": cv2.CAP_PROP_CONTRAST,
"saturation": cv2.CAP_PROP_SATURATION,
"hue": cv2.CAP_PROP_HUE,
"gain": cv2.CAP_PROP_GAIN,
"exposure": cv2.CAP_PROP_EXPOSURE}
#human readable to CV constant property mapping
def __init__(self, camera_index = -1, prop_set = {}, threaded = True, calibrationfile = ''):
global _cameras
global _camera_polling_thread
global _index
"""
**SUMMARY**
In the camera constructor, camera_index indicates which camera to connect to
and props is a dictionary which can be used to set any camera attributes
Supported props are currently: height, width, brightness, contrast,
saturation, hue, gain, and exposure.
You can also specify whether you want the FrameBufferThread to continuously
debuffer the camera. If you specify True, the camera is essentially 'on' at
all times. If you specify off, you will have to manage camera buffers.
**PARAMETERS**
* *camera_index* - The index of the camera, these go from 0 upward, and are system specific.
* *prop_set* - The property set for the camera (i.e. a dict of camera properties).
.. Warning::
For most web cameras only the width and height properties are supported. Support
for all of the other parameters varies by camera and operating system.
* *threaded* - If True we constantly debuffer the camera, otherwise the user
must do this manually.
* *calibrationfile* - A calibration file to load.
"""
self.index = None
self.threaded = False
self.capture = None
if platform.system() == "Linux" and -1 in _index and camera_index != -1 and camera_index not in _index:
process = subprocess.Popen(["lsof /dev/video"+str(camera_index)],shell=True,stdout=subprocess.PIPE)
data = process.communicate()
if data[0]:
camera_index = -1
elif platform.system() == "Linux" and camera_index == -1 and -1 not in _index:
process = subprocess.Popen(["lsof /dev/video*"],shell=True,stdout=subprocess.PIPE)
data = process.communicate()
if data[0]:
camera_index = int(data[0].split("\n")[1].split()[-1][-1])
for cam in _cameras:
if camera_index == cam.index:
self.threaded = cam.threaded
self.capture = cam.capture
self.index = cam.index
_cameras.append(self)
return
#This is to add support for XIMEA cameras.
if isinstance(camera_index, str):
if camera_index.lower() == 'ximea':
camera_index = 1100
_index.append(camera_index)
self.capture = cv.CaptureFromCAM(camera_index) #This fixes bug with opencv not being able to grab frames from webcams on linux
self.index = camera_index
if "delay" in prop_set:
time.sleep(prop_set['delay'])
if platform.system() == "Linux" and ("height" in prop_set or cv.GrabFrame(self.capture) == False):
import pygame.camera
pygame.camera.init()
threaded = True #pygame must be threaded
if camera_index == -1:
camera_index = 0
self.index = camera_index
_index.append(camera_index)
print(_index)
if("height" in prop_set and "width" in prop_set):
self.capture = pygame.camera.Camera("/dev/video" + str(camera_index), (prop_set['width'], prop_set['height']))
else:
self.capture = pygame.camera.Camera("/dev/video" + str(camera_index))
try:
self.capture.start()
except Exception as exc:
msg = "caught exception: %r" % exc
logger.warning(msg)
logger.warning("SimpleCV can't seem to find a camera on your system, or the drivers do not work with SimpleCV.")
return
time.sleep(0)
self.pygame_buffer = self.capture.get_image()
self.pygame_camera = True
else:
_index.append(camera_index)
self.threaded = False
if (platform.system() == "Windows"):
threaded = False
if (not self.capture):
return None
#set any properties in the constructor
for p in prop_set.keys():
if p in self.prop_map:
cv.SetCaptureProperty(self.capture, self.prop_map[p], prop_set[p])
if (threaded):
self.threaded = True
_cameras.append(self)
if (not _camera_polling_thread):
_camera_polling_thread = FrameBufferThread()
_camera_polling_thread.daemon = True
_camera_polling_thread.start()
time.sleep(0) #yield to thread
if calibrationfile:
self.loadCalibration(calibrationfile)
#todo -- make these dynamic attributes of the Camera class
def getProperty(self, prop):
"""
**SUMMARY**
Retrieve the value of a given property, wrapper for cv.GetCaptureProperty
.. Warning::
For most web cameras only the width and height properties are supported. Support
for all of the other parameters varies by camera and operating system.
**PARAMETERS**
* *prop* - The property to retrive.
**RETURNS**
The specified property. If it can't be found the method returns False.
**EXAMPLE**
>>> cam = Camera()
>>> prop = cam.getProperty("width")
"""
if self.pygame_camera:
if prop.lower() == 'width':
return self.capture.get_size()[0]
elif prop.lower() == 'height':
return self.capture.get_size()[1]
else:
return False
if prop in self.prop_map:
return cv.GetCaptureProperty(self.capture, self.prop_map[prop])
return False
def getAllProperties(self):
"""
**SUMMARY**
Return all properties from the camera.
**RETURNS**
A dict of all the camera properties.
"""
if self.pygame_camera:
return False
props = {}
for p in self.prop_map:
props[p] = self.getProperty(p)
return props
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera. If you experience problems
with stale frames from the camera's hardware buffer, increase the flushcache
number to dequeue multiple frames before retrieval
We're working on how to solve this problem.
**RETURNS**
A SimpleCV Image from the camera.
**EXAMPLES**
>>> cam = Camera()
>>> while True:
>>> cam.getImage().show()
"""
if self.pygame_camera:
return Image(self.pygame_buffer.copy())
if (not self.threaded):
cv.GrabFrame(self.capture)
self.capturetime = time.time()
else:
self.capturetime = self._threadcapturetime
frame = cv.RetrieveFrame(self.capture)
newimg = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
cv.Copy(frame, newimg)
return Image(newimg, self)
class VirtualCamera(FrameSource):
"""
**SUMMARY**
The virtual camera lets you test algorithms or functions by providing
a Camera object which is not a physically connected device.
Currently, VirtualCamera supports "image", "imageset" and "video" source types.
**USAGE**
* For image, pass the filename or URL to the image
* For the video, the filename
* For imageset, you can pass either a path or a list of [path, extension]
* For directory you treat a directory to show the latest file, an example would be where a security camera logs images to the directory, calling .getImage() will get the latest in the directory
"""
source = ""
sourcetype = ""
lastmtime = 0
def __init__(self, s, st, start=1):
"""
**SUMMARY**
The constructor takes a source, and source type.
**PARAMETERS**
* *s* - the source of the imagery.
* *st* - the type of the virtual camera. Valid strings include:
* *start* - the number of the frame that you want to start with.
* "image" - a single still image.
* "video" - a video file.
* "imageset" - a SimpleCV image set.
* "directory" - a VirtualCamera for loading a directory
**EXAMPLE**
>>> vc = VirtualCamera("img.jpg", "image")
>>> vc = VirtualCamera("video.mpg", "video")
>>> vc = VirtualCamera("./path_to_images/", "imageset")
>>> vc = VirtualCamera("video.mpg", "video", 300)
>>> vc = VirtualCamera("./imgs", "directory")
"""
self.source = s
self.sourcetype = st
self.counter = 0
if start==0:
start=1
self.start = start
if self.sourcetype not in ["video", "image", "imageset", "directory"]:
print('Error: In VirtualCamera(), Incorrect Source option. "%s" \nUsage:' % self.sourcetype)
print('\tVirtualCamera("filename","video")')
print('\tVirtualCamera("filename","image")')
print('\tVirtualCamera("./path_to_images","imageset")')
print('\tVirtualCamera("./path_to_images","directory")')
return None
else:
if isinstance(self.source,str) and not os.path.exists(self.source):
print('Error: In VirtualCamera()\n\t"%s" was not found.' % self.source)
return None
if (self.sourcetype == "imageset"):
if( isinstance(s,ImageSet) ):
self.source = s
elif( isinstance(s,(list,str)) ):
self.source = ImageSet()
if (isinstance(s,list)):
self.source.load(*s)
else:
self.source.load(s)
else:
warnings.warn('Virtual Camera is unable to figure out the contents of your ImageSet, it must be a directory, list of directories, or an ImageSet object')
elif (self.sourcetype == 'video'):
self.capture = cv.CaptureFromFile(self.source)
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, self.start-1)
elif (self.sourcetype == 'directory'):
pass
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the virtual camera.
**RETURNS**
A SimpleCV Image from the camera.
**EXAMPLES**
>>> cam = VirtualCamera()
>>> while True:
>>> cam.getImage().show()
"""
if (self.sourcetype == 'image'):
self.counter = self.counter + 1
return Image(self.source, self)
elif (self.sourcetype == 'imageset'):
print(len(self.source))
img = self.source[self.counter % len(self.source)]
self.counter = self.counter + 1
return img
elif (self.sourcetype == 'video'):
# cv.QueryFrame returns None if the video is finished
frame = cv.QueryFrame(self.capture)
if frame:
img = cv.CreateImage(cv.GetSize(frame), cv.IPL_DEPTH_8U, 3)
cv.Copy(frame, img)
return Image(img, self)
else:
return None
elif (self.sourcetype == 'directory'):
img = self.findLastestImage(self.source, 'bmp')
self.counter = self.counter + 1
return Image(img, self)
def rewind(self, start=None):
"""
**SUMMARY**
Rewind the Video source back to the given frame.
Available for only video sources.
**PARAMETERS**
start - the number of the frame that you want to rewind to.
if not provided, the video source would be rewound
to the starting frame number you provided or rewound
to the beginning.
**RETURNS**
None
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.rewind()
"""
if (self.sourcetype == 'video'):
if not start:
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, self.start-1)
else:
if start==0:
start=1
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, start-1)
else:
self.counter = 0
def getFrame(self, frame):
"""
**SUMMARY**
Get the provided numbered frame from the video source.
Available for only video sources.
**PARAMETERS**
frame - the number of the frame
**RETURNS**
Image
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> cam.getFrame(400).show()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, frame-1)
img = self.getImage()
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, number_frame)
return img
elif (self.sourcetype == 'imageset'):
img = None
if( frame < len(self.source)):
img = self.source[frame]
return img
else:
return None
def skipFrames(self, n):
"""
**SUMMARY**
Skip n number of frames.
Available for only video sources.
**PARAMETERS**
n - number of frames to be skipped.
**RETURNS**
None
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getImage().show()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
cv.SetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES, number_frame + n - 1)
elif (self.sourcetype == 'imageset'):
self.counter = (self.counter + n) % len(self.source)
else:
self.counter = self.counter + n
def getFrameNumber(self):
"""
**SUMMARY**
Get the current frame number of the video source.
Available for only video sources.
**RETURNS**
* *int* - number of the frame
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getFrameNumber()
"""
if (self.sourcetype == 'video'):
number_frame = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_FRAMES))
return number_frame
else:
return self.counter
def getCurrentPlayTime(self):
"""
**SUMMARY**
Get the current play time in milliseconds of the video source.
Available for only video sources.
**RETURNS**
* *int* - milliseconds of time from beginning of file.
**EXAMPLES**
>>> cam = VirtualCamera("filename.avi", "video", 120)
>>> i=0
>>> while i<60:
... cam.getImage().show()
... i+=1
>>> cam.skipFrames(100)
>>> cam.getCurrentPlayTime()
"""
if (self.sourcetype == 'video'):
milliseconds = int(cv.GetCaptureProperty(self.capture, cv.CV_CAP_PROP_POS_MSEC))
return milliseconds
else:
raise ValueError('sources other than video do not have play time property')
def findLastestImage(self, directory='.', extension='png'):
"""
**SUMMARY**
This function finds the latest file in a directory
with a given extension.
**PARAMETERS**
directory - The directory you want to load images from (defaults to current directory)
extension - The image extension you want to use (defaults to .png)
**RETURNS**
The filename of the latest image
**USAGE**
>>> cam = VirtualCamera('imgs/', 'png') #find all .png files in 'img' directory
>>> cam.getImage() # Grab the latest image from that directory
"""
max_mtime = 0
max_dir = None
max_file = None
max_full_path = None
for dirname,subdirs,files in os.walk(directory):
for fname in files:
if fname.split('.')[-1] == extension:
full_path = os.path.join(dirname, fname)
mtime = os.stat(full_path).st_mtime
if mtime > max_mtime:
max_mtime = mtime
max_dir = dirname
max_file = fname
self.lastmtime = mtime
max_full_path = os.path.abspath(os.path.join(dirname, fname))
#if file is being written, block until mtime is at least 100ms old
while time.mktime(time.localtime()) - os.stat(max_full_path).st_mtime < 0.1:
time.sleep(0)
return max_full_path
class Kinect(FrameSource):
"""
**SUMMARY**
This is an experimental wrapper for the Freenect python libraries
you can getImage() and getDepth() for separate channel images
"""
def __init__(self, device_number=0):
"""
**SUMMARY**
In the kinect contructor, device_number indicates which kinect to
connect to. It defaults to 0.
**PARAMETERS**
* *device_number* - The index of the kinect, these go from 0 upward.
"""
self.deviceNumber = device_number
if not FREENECT_ENABLED:
logger.warning("You don't seem to have the freenect library installed. This will make it hard to use a Kinect.")
#this code was borrowed from
#https://github.com/amiller/libfreenect-goodies
def getImage(self):
"""
**SUMMARY**
This method returns the Kinect camera image.
**RETURNS**
The Kinect's color camera image.
**EXAMPLE**
>>> k = Kinect()
>>> while True:
>>> k.getImage().show()
"""
video = freenect.sync_get_video(self.deviceNumber)[0]
self.capturetime = time.time()
#video = video[:, :, ::-1] # RGB -> BGR
return Image(video.transpose([1,0,2]), self)
#low bits in this depth are stripped so it fits in an 8-bit image channel
def getDepth(self):
"""
**SUMMARY**
This method returns the Kinect depth image.
**RETURNS**
The Kinect's depth camera image as a grayscale image.
**EXAMPLE**
>>> k = Kinect()
>>> while True:
>>> d = k.getDepth()
>>> img = k.getImage()
>>> result = img.sideBySide(d)
>>> result.show()
"""
depth = freenect.sync_get_depth(self.deviceNumber)[0]
self.capturetime = time.time()
np.clip(depth, 0, 2**10 - 1, depth)
depth >>= 2
depth = depth.astype(np.uint8).transpose()
return Image(depth, self)
#we're going to also support a higher-resolution (11-bit) depth matrix
#if you want to actually do computations with the depth
def getDepthMatrix(self):
self.capturetime = time.time()
return freenect.sync_get_depth(self.deviceNumber)[0]
class JpegStreamReader(threading.Thread):
"""
**SUMMARY**
A Threaded class for pulling down JPEG streams and breaking up the images. This
is handy for reading the stream of images from a IP CAmera.
"""
url = ""
currentframe = ""
_threadcapturetime = ""
def run(self):
f = ''
if re.search('@', self.url):
authstuff = re.findall('//(\S+)@', self.url)[0]
self.url = re.sub("//\S+@", "//", self.url)
user, password = authstuff.split(":")
#thank you missing urllib2 manual
#http://www.voidspace.org.uk/python/articles/urllib2.shtml#id5
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, self.url, user, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
f = opener.open(self.url)
else:
f = urllib2.urlopen(self.url)
headers = f.info()
if ("content-type" in headers):
headers['Content-type'] = headers['content-type'] #force ucase first char
if "Content-type" not in headers:
logger.warning("Tried to load a JpegStream from " + self.url + ", but didn't find a content-type header!")
return
(multipart, boundary) = headers['Content-type'].split("boundary=")
if not re.search("multipart", multipart, re.I):
logger.warning("Tried to load a JpegStream from " + self.url + ", but the content type header was " + multipart + " not multipart/replace!")
return
buff = ''
data = f.readline().strip()
length = 0
contenttype = "jpeg"
#the first frame contains a boundarystring and some header info
while (1):
#print data
if (re.search(boundary, data.strip()) and len(buff)):
#we have a full jpeg in buffer. Convert to an image
if contenttype == "jpeg":
self.currentframe = buff
self._threadcapturetime = time.time()
buff = ''
if (re.match("Content-Type", data, re.I)):
#set the content type, if provided (default to jpeg)
(header, typestring) = data.split(":")
(junk, contenttype) = typestring.strip().split("/")
if (re.match("Content-Length", data, re.I)):
#once we have the content length, we know how far to go jfif
(header, length) = data.split(":")
length = int(length.strip())
if (re.search("JFIF", data, re.I) or re.search("\xff\xd8\xff\xdb", data) or len(data) > 55):
# we have reached the start of the image
buff = ''
if length and length > len(data):
buff += data + f.read(length - len(data)) #read the remainder of the image
if contenttype == "jpeg":
self.currentframe = buff
self._threadcapturetime = time.time()
else:
while (not re.search(boundary, data)):
buff += data
data = f.readline()
endimg, junk = data.split(boundary)
buff += endimg
data = boundary
continue
data = f.readline() #load the next (header) line
time.sleep(0) #let the other threads go
class JpegStreamCamera(FrameSource):
"""
**SUMMARY**
The JpegStreamCamera takes a URL of a JPEG stream and treats it like a camera. The current frame can always be accessed with getImage()
Requires the Python Imaging Library: http://www.pythonware.com/library/pil/handbook/index.htm
**EXAMPLE**
Using your Android Phone as a Camera. Softwares like IP Webcam can be used.
>>> cam = JpegStreamCamera("http://192.168.65.101:8080/videofeed") # your IP may be different.
>>> img = cam.getImage()
>>> img.show()
"""
url = ""
camthread = ""
def __init__(self, url):
if not PIL_ENABLED:
logger.warning("You need the Python Image Library (PIL) to use the JpegStreamCamera")
return
if not url.startswith('http://'):
url = "http://" + url
self.url = url
self.camthread = JpegStreamReader()
self.camthread.url = self.url
self.camthread.daemon = True
self.camthread.start()
def getImage(self):
"""
**SUMMARY**
Return the current frame of the JpegStream being monitored
"""
if not self.camthread._threadcapturetime:
now = time.time()
while not self.camthread._threadcapturetime:
if time.time() - now > 5:
warnings.warn("Timeout fetching JpegStream at " + self.url)
return
time.sleep(0.1)
self.capturetime = self.camthread._threadcapturetime
return Image(pil.open(StringIO(self.camthread.currentframe)), self)
_SANE_INIT = False
class Scanner(FrameSource):
"""
**SUMMARY**
The Scanner lets you use any supported SANE-compatable scanner as a SimpleCV camera
List of supported devices: http://www.sane-project.org/sane-supported-devices.html
Requires the PySANE wrapper for libsane. The sane scanner object
is available for direct manipulation at Scanner.device
This scanner object is heavily modified from
https://bitbucket.org/DavidVilla/pysane
Constructor takes an index (default 0) and a list of SANE options
(default is color mode).
**EXAMPLE**
>>> scan = Scanner(0, { "mode": "gray" })
>>> preview = scan.getPreview()
>>> stuff = preview.findBlobs(minsize = 1000)
>>> topleft = (np.min(stuff.x()), np.min(stuff.y()))
>>> bottomright = (np.max(stuff.x()), np.max(stuff.y()))
>>> scan.setROI(topleft, bottomright)
>>> scan.setProperty("resolution", 1200) #set high resolution
>>> scan.setProperty("mode", "color")
>>> img = scan.getImage()
>>> scan.setROI() #reset region of interest
>>> img.show()
"""
usbid = None
manufacturer = None
model = None
kind = None
device = None
max_x = None
max_y = None
def __init__(self, id = 0, properties = { "mode": "color"}):
global _SANE_INIT
import sane
if not _SANE_INIT:
try:
sane.init()
_SANE_INIT = True
except:
warn("Initializing pysane failed, do you have pysane installed?")
return
devices = sane.get_devices()
if not len(devices):
warn("Did not find a sane-compatable device")
return
self.usbid, self.manufacturer, self.model, self.kind = devices[id]
self.device = sane.open(self.usbid)
self.max_x = self.device.br_x
self.max_y = self.device.br_y #save our extents for later
for k, v in properties.items():
setattr(self.device, k, v)
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the scanner. Any ROI set with
setROI() is taken into account.
**RETURNS**
A SimpleCV Image. Note that whatever the scanner mode is,
SimpleCV will return a 3-channel, 8-bit image.
**EXAMPLES**
>>> scan = Scanner()
>>> scan.getImage().show()
"""
return Image(self.device.scan())
def getPreview(self):
"""
**SUMMARY**
Retrieve a preview-quality Image-object from the scanner.
**RETURNS**
A SimpleCV Image. Note that whatever the scanner mode is,
SimpleCV will return a 3-channel, 8-bit image.
**EXAMPLES**
>>> scan = Scanner()
>>> scan.getPreview().show()
"""
self.preview = True
img = Image(self.device.scan())
self.preview = False
return img
def getAllProperties(self):
"""
**SUMMARY**
Return a list of all properties and values from the scanner
**RETURNS**
Dictionary of active options and values. Inactive options appear
as "None"
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getAllProperties()
"""
props = {}
for prop in self.device.optlist:
val = None
if hasattr(self.device, prop):
val = getattr(self.device, prop)
props[prop] = val
return props
def printProperties(self):
"""
**SUMMARY**
Print detailed information about the SANE device properties
**RETURNS**
Nothing
**EXAMPLES**
>>> scan = Scanner()
>>> scan.printProperties()
"""
for prop in self.device.optlist:
try:
print(self.device[prop])
except:
pass
def getProperty(self, prop):
"""
**SUMMARY**
Returns a single property value from the SANE device
equivalent to Scanner.device.PROPERTY
**RETURNS**
Value for option or None if missing/inactive
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getProperty('mode')
color
"""
if hasattr(self.device, prop):
return getattr(self.device, prop)
return None
def setROI(self, topleft = (0,0), bottomright = (-1,-1)):
"""
**SUMMARY**
Sets an ROI for the scanner in the current resolution. The
two parameters, topleft and bottomright, will default to the
device extents, so the ROI can be reset by calling setROI with
no parameters.
The ROI is set by SANE in resolution independent units (default
MM) so resolution can be changed after ROI has been set.
**RETURNS**
None
**EXAMPLES**
>>> scan = Scanner()
>>> scan.setROI((50, 50), (100,100))
>>> scan.getImage().show() # a very small crop on the scanner
"""
self.device.tl_x = self.px2mm(topleft[0])
self.device.tl_y = self.px2mm(topleft[1])
if bottomright[0] == -1:
self.device.br_x = self.max_x
else:
self.device.br_x = self.px2mm(bottomright[0])
if bottomright[1] == -1:
self.device.br_y = self.max_y
else:
self.device.br_y = self.px2mm(bottomright[1])
def setProperty(self, prop, val):
"""
**SUMMARY**
Assigns a property value from the SANE device
equivalent to Scanner.device.PROPERTY = VALUE
**RETURNS**
None
**EXAMPLES**
>>> scan = Scanner()
>>> print scan.getProperty('mode')
color
>>> scan.setProperty("mode") = "gray"
"""
setattr(self.device, prop, val)
def px2mm(self, pixels = 1):
"""
**SUMMARY**
Helper function to convert native scanner resolution to millimeter units
**RETURNS**
Float value
**EXAMPLES**
>>> scan = Scanner()
>>> scan.px2mm(scan.device.resolution) #return DPI in DPMM
"""
return float(pixels * 25.4 / float(self.device.resolution))
class DigitalCamera(FrameSource):
"""
**SUMMARY**
The DigitalCamera takes a point-and-shoot camera or high-end slr and uses it as a Camera. The current frame can always be accessed with getPreview()
Requires the PiggyPhoto Library: https://github.com/alexdu/piggyphoto
**EXAMPLE**
>>> cam = DigitalCamera()
>>> pre = cam.getPreview()
>>> pre.findBlobs().show()
>>>
>>> img = cam.getImage()
>>> img.show()
"""
camera = None
usbid = None
device = None
def __init__(self, id = 0):
try:
import piggyphoto
except:
warn("Initializing piggyphoto failed, do you have piggyphoto installed?")
return
devices = piggyphoto.cameraList(autodetect=True).toList()
if not len(devices):
warn("No compatible digital cameras attached")
return
self.device, self.usbid = devices[id]
self.camera = piggyphoto.camera()
def getImage(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera with the highest quality possible.
**RETURNS**
A SimpleCV Image.
**EXAMPLES**
>>> cam = DigitalCamera()
>>> cam.getImage().show()
"""
fd, path = tempfile.mkstemp()
self.camera.capture_image(path)
img = Image(path)
os.close(fd)
os.remove(path)
return img
def getPreview(self):
"""
**SUMMARY**
Retrieve an Image-object from the camera with the preview quality from the camera.
**RETURNS**
A SimpleCV Image.
**EXAMPLES**
>>> cam = DigitalCamera()
>>> cam.getPreview().show()
"""
fd, path = tempfile.mkstemp()
self.camera.capture_preview(path)
img = Image(path)
os.close(fd)
os.remove(path)
return img
class ScreenCamera():
"""
**SUMMARY**
ScreenCapture is a camera class would allow you to capture all or part of the screen and return it as a color image.
Requires the pyscreenshot Library: https://github.com/vijaym123/pyscreenshot
**EXAMPLE**
>>> sc = ScreenCamera()
>>> res = sc.getResolution()
>>> print res
>>>
>>> img = sc.getImage()
>>> img.show()
"""
_roi = None
def __init__(self):
if not PYSCREENSHOT_ENABLED:
warn("Initializing pyscreenshot failed. Install pyscreenshot from https://github.com/vijaym123/pyscreenshot")
return None
def getResolution(self):
"""
**DESCRIPTION**
returns the resolution of the screenshot of the screen.
**PARAMETERS**
None
**RETURNS**
returns the resolution.
**EXAMPLE**
>>> img = ScreenCamera()
>>> res = img.getResolution()
>>> print res
"""
return Image(pyscreenshot.grab()).size()
def setROI(self,roi):
"""
**DESCRIPTION**
To set the region of interest.
**PARAMETERS**
* *roi* - tuple - It is a tuple of size 4. where region of interest is to the center of the screen.
**RETURNS**
None
**EXAMPLE**
>>> sc = ScreenCamera()
>>> res = sc.getResolution()
>>> sc.setROI(res[0]/4,res[1]/4,res[0]/2,res[1]/2)
>>> img = sc.getImage()
>>> s.show()
"""
if isinstance(roi,tuple) and len(roi)==4:
self._roi = roi
return
def getImage(self):
"""
**DESCRIPTION**
getImage function returns a Image object capturing the current screenshot of the screen.
**PARAMETERS**
None
**RETURNS**
Returns the region of interest if setROI is used.
else returns the original capture of the screenshot.
**EXAMPLE**
>>> sc = ScreenCamera()
>>> img = sc.getImage()
>>> img.show()
"""
img = Image(pyscreenshot.grab())
try :
if self._roi :
img = img.crop(self._roi,centered=True)
except :
print("Error croping the image. ROI specified is not correct.")
return None
return img
class StereoImage:
"""
**SUMMARY**
This class is for binaculor Stereopsis. That is exactrating 3D information from two differing views of a scene(Image). By comparing the two images, the relative depth information can be obtained.
- Fundamental Matrix : F : a 3 x 3 numpy matrix, is a relationship between any two images of the same scene that constrains where the projection of points from the scene can occur in both images. see : http://en.wikipedia.org/wiki/Fundamental_matrix_(computer_vision)
- Homography Matrix : H : a 3 x 3 numpy matrix,
- ptsLeft : The matched points on the left image.
- ptsRight : The matched points on the right image.
-findDisparityMap and findDepthMap - provides 3D information.
for more information on stereo vision, visit : http://en.wikipedia.org/wiki/Computer_stereo_vision
**EXAMPLE**
>>> img1 = Image('sampleimages/stereo_view1.png')
>>> img2 = Image('sampleimages/stereo_view2.png')
>>> stereoImg = StereoImage(img1,img2)
>>> stereoImg.findDisparityMap(method="BM",nDisparity=20).show()
"""
def __init__( self, imgLeft , imgRight ):
self.ImageLeft = imgLeft
self.ImageRight = imgRight
if self.ImageLeft.size() != self.ImageRight.size():
logger.warning('Left and Right images should have the same size.')
return None
else:
self.size = self.ImageLeft.size()
def findFundamentalMat(self, thresh=500.00, minDist=0.15 ):
"""
**SUMMARY**
This method returns the fundamental matrix F such that (P_2).T F P_1 = 0
**PARAMETERS**
* *thresh* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
**RETURNS**
Return None if it fails.
* *F* - Fundamental matrix as ndarray.
* *matched_pts1* - the matched points (x, y) in img1
* *matched_pts2* - the matched points (x, y) in img2
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
**NOTE**
If you deal with the fundamental matrix F directly, be aware of (P_2).T F P_1 = 0
where P_2 and P_1 consist of (y, x, 1)
"""
(kpts1, desc1) = self.ImageLeft._getRawKeypoints(thresh)
(kpts2, desc2) = self.ImageRight._getRawKeypoints(thresh)
if desc1 == None or desc2 == None:
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry.")
return None
num_pts1 = desc1.shape[0]
num_pts2 = desc2.shape[0]
magic_ratio = 1.00
if num_pts1 > num_pts2:
magic_ratio = float(num_pts1) / float(num_pts2)
(idx, dist) = Image()._getFLANNMatches(desc1, desc2)
p = dist.squeeze()
result = p * magic_ratio < minDist
try:
import cv2
except:
logger.warning("Can't use fundamental matrix without OpenCV >= 2.3.0")
return None
pts1 = np.array([kpt.pt for kpt in kpts1])
pts2 = np.array([kpt.pt for kpt in kpts2])
matched_pts1 = pts1[idx[result]].squeeze()
matched_pts2 = pts2[result]
(F, mask) = cv2.findFundamentalMat(matched_pts1, matched_pts2, method=cv.CV_FM_LMEDS)
inlier_ind = mask.nonzero()[0]
matched_pts1 = matched_pts1[inlier_ind, :]
matched_pts2 = matched_pts2[inlier_ind, :]
matched_pts1 = matched_pts1[:, ::-1.00]
matched_pts2 = matched_pts2[:, ::-1.00]
return (F, matched_pts1, matched_pts2)
def findHomography( self, thresh=500.00, minDist=0.15):
"""
**SUMMARY**
This method returns the homography H such that P2 ~ H P1
**PARAMETERS**
* *thresh* - The feature quality metric. This can be any value between about 300 and 500. Higher
values should return fewer, but higher quality features.
* *minDist* - The value below which the feature correspondence is considered a match. This
is the distance between two feature vectors. Good values are between 0.05 and 0.3
**RETURNS**
Return None if it fails.
* *H* - homography as ndarray.
* *matched_pts1* - the matched points (x, y) in img1
* *matched_pts2* - the matched points (x, y) in img2
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> H,pts1,pts2 = stereoImg.findHomography()
**NOTE**
If you deal with the homography H directly, be aware of P2 ~ H P1
where P2 and P1 consist of (y, x, 1)
"""
(kpts1, desc1) = self.ImageLeft._getRawKeypoints(thresh)
(kpts2, desc2) = self.ImageRight._getRawKeypoints(thresh)
if desc1 == None or desc2 == None:
logger.warning("We didn't get any descriptors. Image might be too uniform or blurry.")
return None
num_pts1 = desc1.shape[0]
num_pts2 = desc2.shape[0]
magic_ratio = 1.00
if num_pts1 > num_pts2:
magic_ratio = float(num_pts1) / float(num_pts2)
(idx, dist) = Image()._getFLANNMatches(desc1, desc2)
p = dist.squeeze()
result = p * magic_ratio < minDist
try:
import cv2
except:
logger.warning("Can't use homography without OpenCV >= 2.3.0")
return None
pts1 = np.array([kpt.pt for kpt in kpts1])
pts2 = np.array([kpt.pt for kpt in kpts2])
matched_pts1 = pts1[idx[result]].squeeze()
matched_pts2 = pts2[result]
(H, mask) = cv2.findHomography(matched_pts1, matched_pts2,
method=cv.CV_LMEDS)
inlier_ind = mask.nonzero()[0]
matched_pts1 = matched_pts1[inlier_ind, :]
matched_pts2 = matched_pts2[inlier_ind, :]
matched_pts1 = matched_pts1[:, ::-1.00]
matched_pts2 = matched_pts2[:, ::-1.00]
return (H, matched_pts1, matched_pts2)
def findDisparityMap( self, nDisparity=16 ,method='BM'):
"""
The method generates disparity map from set of stereo images.
**PARAMETERS**
* *method* :
*BM* - Block Matching algorithm, this is a real time algorithm.
*SGBM* - Semi Global Block Matching algorithm, this is not a real time algorithm.
*GC* - Graph Cut algorithm, This is not a real time algorithm.
* *nDisparity* - Maximum disparity value. This should be multiple of 16
* *scale* - Scale factor
**RETURNS**
Return None if it fails.
Returns Disparity Map Image
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> disp = stereoImg.findDisparityMap(method="BM")
"""
gray_left = self.ImageLeft.getGrayscaleMatrix()
gray_right = self.ImageRight.getGrayscaleMatrix()
(r, c) = self.size
scale = int(self.ImageLeft.depth)
if nDisparity % 16 !=0 :
if nDisparity < 16 :
nDisparity = 16
nDisparity = (nDisparity/16)*16
try :
if method == 'BM':
disparity = cv.CreateMat(c, r, cv.CV_32F)
state = cv.CreateStereoBMState()
state.SADWindowSize = 41
state.preFilterType = 1
state.preFilterSize = 41
state.preFilterCap = 31
state.minDisparity = -8
state.numberOfDisparities = nDisparity
state.textureThreshold = 10
#state.speckleRange = 32
#state.speckleWindowSize = 100
state.uniquenessRatio=15
cv.FindStereoCorrespondenceBM(gray_left, gray_right, disparity, state)
disparity_visual = cv.CreateMat(c, r, cv.CV_8U)
cv.Normalize( disparity, disparity_visual, 0, 256, cv.CV_MINMAX )
disparity_visual = Image(disparity_visual)
return Image(disparity_visual.getBitmap(),colorSpace=ColorSpace.GRAY)
elif method == 'GC':
disparity_left = cv.CreateMat(c, r, cv.CV_32F)
disparity_right = cv.CreateMat(c, r, cv.CV_32F)
state = cv.CreateStereoGCState(nDisparity, 8)
state.minDisparity = -8
cv.FindStereoCorrespondenceGC( gray_left, gray_right, disparity_left, disparity_right, state, 0)
disparity_left_visual = cv.CreateMat(c, r, cv.CV_8U)
cv.Normalize( disparity_left, disparity_left_visual, 0, 256, cv.CV_MINMAX )
#cv.Scale(disparity_left, disparity_left_visual, -scale)
disparity_left_visual = Image(disparity_left_visual)
return Image(disparity_left_visual.getBitmap(),colorSpace=ColorSpace.GRAY)
elif method == 'SGBM':
try:
import cv2
ver = cv2.__version__
if ver.startswith("$Rev :"):
logger.warning("Can't use SGBM without OpenCV >= 2.4.0")
return None
except:
logger.warning("Can't use SGBM without OpenCV >= 2.4.0")
return None
state = cv2.StereoSGBM()
state.SADWindowSize = 41
state.preFilterCap = 31
state.minDisparity = 0
state.numberOfDisparities = nDisparity
#state.speckleRange = 32
#state.speckleWindowSize = 100
state.disp12MaxDiff = 1
state.fullDP=False
state.P1 = 8 * 1 * 41 * 41
state.P2 = 32 * 1 * 41 * 41
state.uniquenessRatio=15
disparity=state.compute(self.ImageLeft.getGrayNumpy(),self.ImageRight.getGrayNumpy())
return Image(disparity)
else :
logger.warning("Unknown method. Choose one method amoung BM or SGBM or GC !")
return None
except :
logger.warning("Error in computing the Disparity Map, may be due to the Images are stereo in nature.")
return None
def Eline (self, point, F, whichImage):
"""
**SUMMARY**
This method returns, line feature object.
**PARAMETERS**
* *point* - Input point (x, y)
* *F* - Fundamental matrix.
* *whichImage* - Index of the image (1 or 2) that contains the point
**RETURNS**
epipolar line, in the form of line feature object.
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
>>> point = pts2[0]
>>> epiline = mapper.Eline(point,F, 1) #find corresponding Epipolar line in the left image.
"""
from SimpleCV.Features.Detection import Line
pts1 = (0,0)
pts2 = self.size
pt_cvmat = cv.CreateMat(1, 1, cv.CV_32FC2)
pt_cvmat[0, 0] = (point[1], point[0]) # OpenCV seems to use (y, x) coordinate.
line = cv.CreateMat(1, 1, cv.CV_32FC3)
cv.ComputeCorrespondEpilines(pt_cvmat, whichImage, npArray2cvMat(F), line)
line_npArray = np.array(line).squeeze()
line_npArray = line_npArray[[1.00, 0, 2]]
pts1 = (pts1[0],(-line_npArray[2]-line_npArray[0]*pts1[0])/line_npArray[1] )
pts2 = (pts2[0],(-line_npArray[2]-line_npArray[0]*pts2[0])/line_npArray[1] )
if whichImage == 1 :
return Line(self.ImageLeft, [pts1,pts2])
elif whichImage == 2 :
return Line(self.ImageRight, [pts1,pts2])
def projectPoint( self, point, H ,whichImage):
"""
**SUMMARY**
This method returns the corresponding point (x, y)
**PARAMETERS**
* *point* - Input point (x, y)
* *whichImage* - Index of the image (1 or 2) that contains the point
* *H* - Homography that can be estimated
using StereoCamera.findHomography()
**RETURNS**
Corresponding point (x, y) as tuple
**EXAMPLE**
>>> img1 = Image("sampleimages/stereo_view1.png")
>>> img2 = Image("sampleimages/stereo_view2.png")
>>> stereoImg = StereoImage(img1,img2)
>>> F,pts1,pts2 = stereoImg.findFundamentalMat()
>>> point = pts2[0]
>>> projectPoint = stereoImg.projectPoint(point,H ,1) #finds corresponding point in the left image.
"""
H = np.matrix(H)
point = np.matrix((point[1], point[0],1.00))
if whichImage == 1.00:
corres_pt = H * point.T
else:
corres_pt = np.linalg.inv(H) * point.T
corres_pt = corres_pt / corres_pt[2]
return (float(corres_pt[1]), float(corres_pt[0]))
def get3DImage(self, Q, method="BM", state=None):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *Q* - reprojection Matrix (disparity to depth matrix)
* *method* - Stereo Correspondonce method to be used.
- "BM" - Stereo BM
- "SGBM" - Stereo SGBM
* *state* - dictionary corresponding to parameters of
stereo correspondonce.
SADWindowSize - odd int
nDisparity - int
minDisparity - int
preFilterCap - int
preFilterType - int (only BM)
speckleRange - int
speckleWindowSize - int
P1 - int (only SGBM)
P2 - int (only SGBM)
fullDP - Bool (only SGBM)
uniquenessRatio - int
textureThreshold - int (only BM)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoImage.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoImage(lImage, rImage)
>>> Q = cv.Load("Q.yml")
>>> stereo.get3DImage(Q).show()
>>> state = {"SADWindowSize":9, "nDisparity":112, "minDisparity":-39}
>>> stereo.get3DImage(Q, "BM", state).show()
>>> stereo.get3DImage(Q, "SGBM", state).show()
"""
imgLeft = self.ImageLeft
imgRight = self.ImageRight
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
(r, c) = self.size
if method == "BM":
sbm = cv.CreateStereoBMState()
disparity = cv.CreateMat(c, r, cv.CV_32F)
if state:
SADWindowSize = state.get("SADWindowSize")
preFilterCap = state.get("preFilterCap")
minDisparity = state.get("minDisparity")
numberOfDisparities = state.get("nDisparity")
uniquenessRatio = state.get("uniquenessRatio")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
textureThreshold = state.get("textureThreshold")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
preFilterType = state.get("perFilterType")
if SADWindowSize is not None:
sbm.SADWindowSize = SADWindowSize
if preFilterCap is not None:
sbm.preFilterCap = preFilterCap
if minDisparity is not None:
sbm.minDisparity = minDisparity
if numberOfDisparities is not None:
sbm.numberOfDisparities = numberOfDisparities
if uniquenessRatio is not None:
sbm.uniquenessRatio = uniquenessRatio
if speckleRange is not None:
sbm.speckleRange = speckleRange
if speckleWindowSize is not None:
sbm.speckleWindowSize = speckleWindowSize
if textureThreshold is not None:
sbm.textureThreshold = textureThreshold
if preFilterType is not None:
sbm.preFilterType = preFilterType
else:
sbm.SADWindowSize = 9
sbm.preFilterType = 1
sbm.preFilterSize = 5
sbm.preFilterCap = 61
sbm.minDisparity = -39
sbm.numberOfDisparities = 112
sbm.textureThreshold = 507
sbm.uniquenessRatio= 0
sbm.speckleRange = 8
sbm.speckleWindowSize = 0
gray_left = imgLeft.getGrayscaleMatrix()
gray_right = imgRight.getGrayscaleMatrix()
cv.FindStereoCorrespondenceBM(gray_left, gray_right, disparity, sbm)
disparity_visual = cv.CreateMat(c, r, cv.CV_8U)
elif method == "SGBM":
if not cv2flag:
warnings.warn("Can't Use SGBM without OpenCV >= 2.4. Use SBM instead.")
sbm = cv2.StereoSGBM()
if state:
SADWindowSize = state.get("SADWindowSize")
preFilterCap = state.get("preFilterCap")
minDisparity = state.get("minDisparity")
numberOfDisparities = state.get("nDisparity")
P1 = state.get("P1")
P2 = state.get("P2")
uniquenessRatio = state.get("uniquenessRatio")
speckleRange = state.get("speckleRange")
speckleWindowSize = state.get("speckleWindowSize")
fullDP = state.get("fullDP")
if SADWindowSize is not None:
sbm.SADWindowSize = SADWindowSize
if preFilterCap is not None:
sbm.preFilterCap = preFilterCap
if minDisparity is not None:
sbm.minDisparity = minDisparity
if numberOfDisparities is not None:
sbm.numberOfDisparities = numberOfDisparities
if P1 is not None:
sbm.P1 = P1
if P2 is not None:
sbm.P2 = P2
if uniquenessRatio is not None:
sbm.uniquenessRatio = uniquenessRatio
if speckleRange is not None:
sbm.speckleRange = speckleRange
if speckleWindowSize is not None:
sbm.speckleWindowSize = speckleWindowSize
if fullDP is not None:
sbm.fullDP = fullDP
else:
sbm.SADWindowSize = 9;
sbm.numberOfDisparities = 96;
sbm.preFilterCap = 63;
sbm.minDisparity = -21;
sbm.uniquenessRatio = 7;
sbm.speckleWindowSize = 0;
sbm.speckleRange = 8;
sbm.disp12MaxDiff = 1;
sbm.fullDP = False;
disparity = sbm.compute(imgLeft.getGrayNumpyCv2(), imgRight.getGrayNumpyCv2())
else:
warnings.warn("Unknown method. Returning None")
return None
if cv2flag:
if not isinstance(Q, np.ndarray):
Q = np.array(Q)
if not isinstance(disparity, np.ndarray):
disparity = np.array(disparity)
Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.cv.CV_32F)
Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC3)
retVal = Image(Image3D_normalize, cv2image=True)
else:
Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_32FC3)
Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_8UC3)
cv.ReprojectImageTo3D(disparity, Image3D, Q)
cv.Normalize(Image3D, Image3D_normalize, 0, 255, cv.CV_MINMAX, CV_8UC3)
retVal = Image(Image3D_normalize)
self.Image3D = Image3D
return retVal
def get3DImageFromDisparity(self, disparity, Q):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *disparity* - Disparity Image
* *Q* - reprojection Matrix (disparity to depth matrix)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoCamera.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoCamera()
>>> Q = cv.Load("Q.yml")
>>> disp = stereo.findDisparityMap()
>>> stereo.get3DImageFromDisparity(disp, Q)
"""
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
if cv2flag:
if not isinstance(Q, np.ndarray):
Q = np.array(Q)
disparity = disparity.getNumpyCv2()
Image3D = cv2.reprojectImageTo3D(disparity, Q, ddepth=cv2.cv.CV_32F)
Image3D_normalize = cv2.normalize(Image3D, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX, dtype=cv2.cv.CV_8UC3)
retVal = Image(Image3D_normalize, cv2image=True)
else:
disparity = disparity.getMatrix()
Image3D = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_32FC3)
Image3D_normalize = cv.CreateMat(self.LeftImage.size()[1], self.LeftImage.size()[0], cv2.cv.CV_8UC3)
cv.ReprojectImageTo3D(disparity, Image3D, Q)
cv.Normalize(Image3D, Image3D_normalize, 0, 255, cv.CV_MINMAX, CV_8UC3)
retVal = Image(Image3D_normalize)
self.Image3D = Image3D
return retVal
class StereoCamera :
"""
Stereo Camera is a class dedicated for calibration stereo camera. It also has functionalites for
rectification and getting undistorted Images.
This class can be used to calculate various parameters related to both the camera's :
-> Camera Matrix
-> Distortion coefficients
-> Rotation and Translation matrix
-> Rectification transform (rotation matrix)
-> Projection matrix in the new (rectified) coordinate systems
-> Disparity-to-depth mapping matrix (Q)
"""
def __init__(self):
return
def stereoCalibration(self,camLeft, camRight, nboards=30, chessboard=(8, 5), gridsize=0.027, WinSize = (352,288)):
"""
**SUMMARY**
Stereo Calibration is a way in which you obtain the parameters that will allow you to calculate 3D information of the scene.
Once both the camera's are initialized.
Press [Space] once chessboard is identified in both the camera's.
Press [esc] key to exit the calibration process.
**PARAMETERS**
* camLeft - Left camera index.
* camRight - Right camera index.
* nboards - Number of samples or multiple views of the chessboard in different positions and orientations with your stereo camera.
* chessboard - A tuple of Cols, Rows in the chessboard (used for calibration).
* gridsize - chessboard grid size in real units
* WinSize - This is the window resolution.
**RETURNS**
A tuple of the form (CM1, CM2, D1, D2, R, T, E, F) on success
CM1 - Camera Matrix for left camera,
CM2 - Camera Matrix for right camera,
D1 - Vector of distortion coefficients for left camera,
D2 - Vector of distortion coefficients for right camera,
R - Rotation matrix between the left and the right camera coordinate systems,
T - Translation vector between the left and the right coordinate systems of the cameras,
E - Essential matrix,
F - Fundamental matrix
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.StereoCalibration(1,2,nboards=40)
**Note**
Press space to capture the images.
"""
count = 0
n1="Left"
n2="Right"
try :
captureLeft = cv.CaptureFromCAM(camLeft)
cv.SetCaptureProperty(captureLeft, cv.CV_CAP_PROP_FRAME_WIDTH, WinSize[0])
cv.SetCaptureProperty(captureLeft, cv.CV_CAP_PROP_FRAME_HEIGHT, WinSize[1])
frameLeft = cv.QueryFrame(captureLeft)
cv.FindChessboardCorners(frameLeft, (chessboard))
captureRight = cv.CaptureFromCAM(camRight)
cv.SetCaptureProperty(captureRight, cv.CV_CAP_PROP_FRAME_WIDTH, WinSize[0])
cv.SetCaptureProperty(captureRight, cv.CV_CAP_PROP_FRAME_HEIGHT, WinSize[1])
frameRight = cv.QueryFrame(captureRight)
cv.FindChessboardCorners(frameRight, (chessboard))
except :
print("Error Initialising the Left and Right camera")
return None
imagePoints1 = cv.CreateMat(1, nboards * chessboard[0] * chessboard[1], cv.CV_64FC2)
imagePoints2 = cv.CreateMat(1, nboards * chessboard[0] * chessboard[1], cv.CV_64FC2)
objectPoints = cv.CreateMat(1, chessboard[0] * chessboard[1] * nboards, cv.CV_64FC3)
nPoints = cv.CreateMat(1, nboards, cv.CV_32S)
# the intrinsic camera matrices
CM1 = cv.CreateMat(3, 3, cv.CV_64F)
CM2 = cv.CreateMat(3, 3, cv.CV_64F)
# the distortion coefficients of both cameras
D1 = cv.CreateMat(1, 5, cv.CV_64F)
D2 = cv.CreateMat(1, 5, cv.CV_64F)
# matrices governing the rotation and translation from camera 1 to camera 2
R = cv.CreateMat(3, 3, cv.CV_64F)
T = cv.CreateMat(3, 1, cv.CV_64F)
# the essential and fundamental matrices
E = cv.CreateMat(3, 3, cv.CV_64F)
F = cv.CreateMat(3, 3, cv.CV_64F)
while True:
frameLeft = cv.QueryFrame(captureLeft)
cv.Flip(frameLeft, frameLeft, 1)
frameRight = cv.QueryFrame(captureRight)
cv.Flip(frameRight, frameRight, 1)
k = cv.WaitKey(3)
cor1 = cv.FindChessboardCorners(frameLeft, (chessboard))
if cor1[0] :
cv.DrawChessboardCorners(frameLeft, (chessboard), cor1[1], cor1[0])
cv.ShowImage(n1, frameLeft)
cor2 = cv.FindChessboardCorners(frameRight, (chessboard))
if cor2[0]:
cv.DrawChessboardCorners(frameRight, (chessboard), cor2[1], cor2[0])
cv.ShowImage(n2, frameRight)
if cor1[0] and cor2[0] and k==0x20:
print(count)
for i in range(0, len(cor1[1])):
cv.Set1D(imagePoints1, count * chessboard[0] * chessboard[1] + i, cv.Scalar(cor1[1][i][0], cor1[1][i][1]))
cv.Set1D(imagePoints2, count * chessboard[0] * chessboard[1] + i, cv.Scalar(cor2[1][i][0], cor2[1][i][1]))
count += 1
if count == nboards:
cv.DestroyAllWindows()
for i in range(nboards):
for j in range(chessboard[1]):
for k in range(chessboard[0]):
cv.Set1D(objectPoints, i * chessboard[1] * chessboard[0] + j * chessboard[0] + k, (k * gridsize, j * gridsize, 0))
for i in range(nboards):
cv.Set1D(nPoints, i, chessboard[0] * chessboard[1])
cv.SetIdentity(CM1)
cv.SetIdentity(CM2)
cv.Zero(D1)
cv.Zero(D2)
print("Running stereo calibration...")
del(camLeft)
del(camRight)
cv.StereoCalibrate(objectPoints, imagePoints1, imagePoints2, nPoints, CM1, D1, CM2, D2, WinSize, R, T, E, F,
flags=cv.CV_CALIB_SAME_FOCAL_LENGTH | cv.CV_CALIB_ZERO_TANGENT_DIST)
print("Done.")
return (CM1, CM2, D1, D2, R, T, E, F)
cv.ShowImage(n1, frameLeft)
cv.ShowImage(n2, frameRight)
if k == 0x1b:
print("ESC pressed. Exiting. WARNING: NOT ENOUGH CHESSBOARDS FOUND YET")
cv.DestroyAllWindows()
break
def saveCalibration(self,calibration=None, fname="Stereo",cdir="."):
"""
**SUMMARY**
saveCalibration is a method to save the StereoCalibration parameters such as CM1, CM2, D1, D2, R, T, E, F of stereo pair.
This method returns True on success and saves the calibration in the following format.
StereoCM1.txt
StereoCM2.txt
StereoD1.txt
StereoD2.txt
StereoR.txt
StereoT.txt
StereoE.txt
StereoF.txt
**PARAMETERS**
calibration - is a tuple os the form (CM1, CM2, D1, D2, R, T, E, F)
CM1 -> Camera Matrix for left camera,
CM2 -> Camera Matrix for right camera,
D1 -> Vector of distortion coefficients for left camera,
D2 -> Vector of distortion coefficients for right camera,
R -> Rotation matrix between the left and the right camera coordinate systems,
T -> Translation vector between the left and the right coordinate systems of the cameras,
E -> Essential matrix,
F -> Fundamental matrix
**RETURNS**
return True on success and saves the calibration files.
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.StereoCalibration(1,2,nboards=40)
>>> StereoCam.saveCalibration(calibration,fname="Stereo1")
"""
filenames = (fname+"CM1.txt", fname+"CM2.txt", fname+"D1.txt", fname+"D2.txt", fname+"R.txt", fname+"T.txt", fname+"E.txt", fname+"F.txt")
try :
(CM1, CM2, D1, D2, R, T, E, F) = calibration
cv.Save("{0}/{1}".format(cdir, filenames[0]), CM1)
cv.Save("{0}/{1}".format(cdir, filenames[1]), CM2)
cv.Save("{0}/{1}".format(cdir, filenames[2]), D1)
cv.Save("{0}/{1}".format(cdir, filenames[3]), D2)
cv.Save("{0}/{1}".format(cdir, filenames[4]), R)
cv.Save("{0}/{1}".format(cdir, filenames[5]), T)
cv.Save("{0}/{1}".format(cdir, filenames[6]), E)
cv.Save("{0}/{1}".format(cdir, filenames[7]), F)
print("Calibration parameters written to directory '{0}'.".format(cdir))
return True
except :
return False
def loadCalibration(self,fname="Stereo",dir="."):
"""
**SUMMARY**
loadCalibration is a method to load the StereoCalibration parameters such as CM1, CM2, D1, D2, R, T, E, F of stereo pair.
This method loads from calibration files and return calibration on success else return false.
**PARAMETERS**
fname - is the prefix of the calibration files.
dir - is the directory in which files are present.
**RETURNS**
a tuple of the form (CM1, CM2, D1, D2, R, T, E, F) on success.
CM1 - Camera Matrix for left camera
CM2 - Camera Matrix for right camera
D1 - Vector of distortion coefficients for left camera
D2 - Vector of distortion coefficients for right camera
R - Rotation matrix between the left and the right camera coordinate systems
T - Translation vector between the left and the right coordinate systems of the cameras
E - Essential matrix
F - Fundamental matrix
else returns false
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> loadedCalibration = StereoCam.loadCalibration(fname="Stereo1")
"""
filenames = (fname+"CM1.txt", fname+"CM2.txt", fname+"D1.txt", fname+"D2.txt", fname+"R.txt", fname+"T.txt", fname+"E.txt", fname+"F.txt")
try :
CM1 = cv.Load("{0}/{1}".format(dir, filenames[0]))
CM2 = cv.Load("{0}/{1}".format(dir, filenames[1]))
D1 = cv.Load("{0}/{1}".format(dir, filenames[2]))
D2 = cv.Load("{0}/{1}".format(dir, filenames[3]))
R = cv.Load("{0}/{1}".format(dir, filenames[4]))
T = cv.Load("{0}/{1}".format(dir, filenames[5]))
E = cv.Load("{0}/{1}".format(dir, filenames[6]))
F = cv.Load("{0}/{1}".format(dir, filenames[7]))
print("Calibration files loaded from dir '{0}'.".format(dir))
return (CM1, CM2, D1, D2, R, T, E, F)
except :
return False
def stereoRectify(self,calib=None,WinSize=(352,288)):
"""
**SUMMARY**
Computes rectification transforms for each head of a calibrated stereo camera.
**PARAMETERS**
calibration - is a tuple os the form (CM1, CM2, D1, D2, R, T, E, F)
CM1 - Camera Matrix for left camera,
CM2 - Camera Matrix for right camera,
D1 - Vector of distortion coefficients for left camera,
D2 - Vector of distortion coefficients for right camera,
R - Rotation matrix between the left and the right camera coordinate systems,
T - Translation vector between the left and the right coordinate systems of the cameras,
E - Essential matrix,
F - Fundamental matrix
**RETURNS**
On success returns a a tuple of the format -> (R1, R2, P1, P2, Q, roi)
R1 - Rectification transform (rotation matrix) for the left camera.
R2 - Rectification transform (rotation matrix) for the right camera.
P1 - Projection matrix in the new (rectified) coordinate systems for the left camera.
P2 - Projection matrix in the new (rectified) coordinate systems for the right camera.
Q - disparity-to-depth mapping matrix.
**EXAMPLE**
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.loadCalibration(fname="Stereo1")
>>> rectification = StereoCam.stereoRectify(calibration)
"""
(CM1, CM2, D1, D2, R, T, E, F) = calib
R1 = cv.CreateMat(3, 3, cv.CV_64F)
R2 = cv.CreateMat(3, 3, cv.CV_64F)
P1 = cv.CreateMat(3, 4, cv.CV_64F)
P2 = cv.CreateMat(3, 4, cv.CV_64F)
Q = cv.CreateMat(4, 4, cv.CV_64F)
print("Running stereo rectification...")
(leftroi, rightroi) = cv.StereoRectify(CM1, CM2, D1, D2, WinSize, R, T, R1, R2, P1, P2, Q)
roi = []
roi.append(max(leftroi[0], rightroi[0]))
roi.append(max(leftroi[1], rightroi[1]))
roi.append(min(leftroi[2], rightroi[2]))
roi.append(min(leftroi[3], rightroi[3]))
print("Done.")
return (R1, R2, P1, P2, Q, roi)
def getImagesUndistort(self,imgLeft, imgRight, calibration, rectification, WinSize=(352,288)):
"""
**SUMMARY**
Rectify two images from the calibration and rectification parameters.
**PARAMETERS**
* *imgLeft* - Image captured from left camera and needs to be rectified.
* *imgRight* - Image captures from right camera and need to be rectified.
* *calibration* - A calibration tuple of the format (CM1, CM2, D1, D2, R, T, E, F)
* *rectification* - A rectification tuple of the format (R1, R2, P1, P2, Q, roi)
**RETURNS**
returns rectified images in a tuple -> (imgLeft,imgRight)
>>> StereoCam = StereoCamera()
>>> calibration = StereoCam.loadCalibration(fname="Stereo1")
>>> rectification = StereoCam.stereoRectify(loadedCalibration)
>>> imgLeft = camLeft.getImage()
>>> imgRight = camRight.getImage()
>>> rectLeft,rectRight = StereoCam.getImagesUndistort(imgLeft,imgRight,calibration,rectification)
"""
imgLeft = imgLeft.getMatrix()
imgRight = imgRight.getMatrix()
(CM1, CM2, D1, D2, R, T, E, F) = calibration
(R1, R2, P1, P2, Q, roi) = rectification
dst1 = cv.CloneMat(imgLeft)
dst2 = cv.CloneMat(imgRight)
map1x = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map2x = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map1y = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
map2y = cv.CreateMat(WinSize[1], WinSize[0], cv.CV_32FC1)
#print "Rectifying images..."
cv.InitUndistortRectifyMap(CM1, D1, R1, P1, map1x, map1y)
cv.InitUndistortRectifyMap(CM2, D2, R2, P2, map2x, map2y)
cv.Remap(imgLeft, dst1, map1x, map1y)
cv.Remap(imgRight, dst2, map2x, map2y)
return Image(dst1), Image(dst2)
def get3DImage(self, leftIndex, rightIndex, Q, method="BM", state=None):
"""
**SUMMARY**
This method returns the 3D depth image using reprojectImageTo3D method.
**PARAMETERS**
* *leftIndex* - Index of left camera
* *rightIndex* - Index of right camera
* *Q* - reprojection Matrix (disparity to depth matrix)
* *method* - Stereo Correspondonce method to be used.
- "BM" - Stereo BM
- "SGBM" - Stereo SGBM
* *state* - dictionary corresponding to parameters of
stereo correspondonce.
SADWindowSize - odd int
nDisparity - int
minDisparity - int
preFilterCap - int
preFilterType - int (only BM)
speckleRange - int
speckleWindowSize - int
P1 - int (only SGBM)
P2 - int (only SGBM)
fullDP - Bool (only SGBM)
uniquenessRatio - int
textureThreshold - int (only BM)
**RETURNS**
SimpleCV.Image representing 3D depth Image
also StereoCamera.Image3D gives OpenCV 3D Depth Image of CV_32F type.
**EXAMPLE**
>>> lImage = Image("l.jpg")
>>> rImage = Image("r.jpg")
>>> stereo = StereoCamera()
>>> Q = cv.Load("Q.yml")
>>> stereo.get3DImage(1, 2, Q).show()
>>> state = {"SADWindowSize":9, "nDisparity":112, "minDisparity":-39}
>>> stereo.get3DImage(1, 2, Q, "BM", state).show()
>>> stereo.get3DImage(1, 2, Q, "SGBM", state).show()
"""
cv2flag = True
try:
import cv2
except ImportError:
cv2flag = False
import cv2.cv as cv
if cv2flag:
camLeft = cv2.VideoCapture(leftIndex)
camRight = cv2.VideoCapture(rightIndex)
if camLeft.isOpened():
_, imgLeft = camLeft.read()
else:
warnings.warn("Unable to open left camera")
return None
if camRight.isOpened():
_, imgRight = camRight.read()
else:
warnings.warn("Unable to open right camera")
return None
imgLeft = Image(imgLeft, cv2image=True)
imgRight = Image(imgRight, cv2image=True)
else:
camLeft = cv.CaptureFromCAM(leftIndex)
camRight = cv.CaptureFromCAM(rightIndex)
imgLeft = cv.QueryFrame(camLeft)
if imgLeft is None:
warnings.warn("Unable to open left camera")
return None
imgRight = cv.QueryFrame(camRight)
if imgRight is None:
warnings.warn("Unable to open right camera")
return None
imgLeft = Image(imgLeft, cv2image=True)
imgRight = Image(imgRight, cv2image=True)
del camLeft
del camRight
stereoImages = StereoImage(imgLeft, imgRight)
Image3D_normalize = stereoImages.get3DImage(Q, method, state)
self.Image3D = stereoImages.Image3D
return Image3D_normalize
class AVTCameraThread(threading.Thread):
camera = None
run = True
verbose = False
lock = None
logger = None
framerate = 0
def __init__(self, camera):
super(AVTCameraThread, self).__init__()
self._stop = threading.Event()
self.camera = camera
self.lock = threading.Lock()
self.name = 'Thread-Camera-ID-' + str(self.camera.uniqueid)
def run(self):
counter = 0
timestamp = time.time()
while self.run:
self.lock.acquire()
self.camera.runCommand("AcquisitionStart")
frame = self.camera._getFrame(1000)
if frame:
img = Image(pil.fromstring(self.camera.imgformat,
(self.camera.width, self.camera.height),
frame.ImageBuffer[:int(frame.ImageBufferSize)]))
self.camera._buffer.appendleft(img)
self.camera.runCommand("AcquisitionStop")
self.lock.release()
counter += 1
time.sleep(0.01)
if time.time() - timestamp >= 1:
self.camera.framerate = counter
counter = 0
timestamp = time.time()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
AVTCameraErrors = [
("ePvErrSuccess", "No error"),
("ePvErrCameraFault", "Unexpected camera fault"),
("ePvErrInternalFault", "Unexpected fault in PvApi or driver"),
("ePvErrBadHandle", "Camera handle is invalid"),
("ePvErrBadParameter", "Bad parameter to API call"),
("ePvErrBadSequence", "Sequence of API calls is incorrect"),
("ePvErrNotFound", "Camera or attribute not found"),
("ePvErrAccessDenied", "Camera cannot be opened in the specified mode"),
("ePvErrUnplugged", "Camera was unplugged"),
("ePvErrInvalidSetup", "Setup is invalid (an attribute is invalid)"),
("ePvErrResources", "System/network resources or memory not available"),
("ePvErrBandwidth", "1394 bandwidth not available"),
("ePvErrQueueFull", "Too many frames on queue"),
("ePvErrBufferTooSmall", "Frame buffer is too small"),
("ePvErrCancelled", "Frame cancelled by user"),
("ePvErrDataLost", "The data for the frame was lost"),
("ePvErrDataMissing", "Some data in the frame is missing"),
("ePvErrTimeout", "Timeout during wait"),
("ePvErrOutOfRange", "Attribute value is out of the expected range"),
("ePvErrWrongType", "Attribute is not this type (wrong access function)"),
("ePvErrForbidden", "Attribute write forbidden at this time"),
("ePvErrUnavailable", "Attribute is not available at this time"),
("ePvErrFirewall", "A firewall is blocking the traffic (Windows only)"),
]
def pverr(errcode):
if errcode:
raise Exception(": ".join(AVTCameraErrors[errcode]))
class AVTCamera(FrameSource):
"""
**SUMMARY**
AVTCamera is a ctypes wrapper for the Prosilica/Allied Vision cameras,
such as the "manta" series.
These require the PvAVT binary driver from Allied Vision:
http://www.alliedvisiontec.com/us/products/1108.html
Note that as of time of writing the new VIMBA driver is not available
for Mac/Linux - so this uses the legacy PvAVT drive
Props to Cixelyn, whos py-avt-pvapi module showed how to get much
of this working https://bitbucket.org/Cixelyn/py-avt-pvapi
All camera properties are directly from the PvAVT manual -- if not
specified it will default to whatever the camera state is. Cameras
can either by
**EXAMPLE**
>>> cam = AVTCamera(0, {"width": 656, "height": 492})
>>>
>>> img = cam.getImage()
>>> img.show()
"""
_buffer = None # Buffer to store images
_buffersize = 10 # Number of images to keep in the rolling image buffer for threads
_lastimage = None # Last image loaded into memory
_thread = None
_framerate = 0
threaded = False
_pvinfo = { }
_properties = {
"AcqEndTriggerEvent": ("Enum", "R/W"),
"AcqEndTriggerMode": ("Enum", "R/W"),
"AcqRecTriggerEvent": ("Enum", "R/W"),
"AcqRecTriggerMode": ("Enum", "R/W"),
"AcqStartTriggerEvent": ("Enum", "R/W"),
"AcqStartTriggerMode": ("Enum", "R/W"),
"FrameRate": ("Float32", "R/W"),
"FrameStartTriggerDelay": ("Uint32", "R/W"),
"FrameStartTriggerEvent": ("Enum", "R/W"),
"FrameStartTriggerMode": ("Enum", "R/W"),
"FrameStartTriggerOverlap": ("Enum", "R/W"),
"AcquisitionFrameCount": ("Uint32", "R/W"),
"AcquisitionMode": ("Enum", "R/W"),
"RecorderPreEventCount": ("Uint32", "R/W"),
"ConfigFileIndex": ("Enum", "R/W"),
"ConfigFilePowerup": ("Enum", "R/W"),
"DSPSubregionBottom": ("Uint32", "R/W"),
"DSPSubregionLeft": ("Uint32", "R/W"),
"DSPSubregionRight": ("Uint32", "R/W"),
"DSPSubregionTop": ("Uint32", "R/W"),
"DefectMaskColumnEnable": ("Enum", "R/W"),
"ExposureAutoAdjustTol": ("Uint32", "R/W"),
"ExposureAutoAlg": ("Enum", "R/W"),
"ExposureAutoMax": ("Uint32", "R/W"),
"ExposureAutoMin": ("Uint32", "R/W"),
"ExposureAutoOutliers": ("Uint32", "R/W"),
"ExposureAutoRate": ("Uint32", "R/W"),
"ExposureAutoTarget": ("Uint32", "R/W"),
"ExposureMode": ("Enum", "R/W"),
"ExposureValue": ("Uint32", "R/W"),
"GainAutoAdjustTol": ("Uint32", "R/W"),
"GainAutoMax": ("Uint32", "R/W"),
"GainAutoMin": ("Uint32", "R/W"),
"GainAutoOutliers": ("Uint32", "R/W"),
"GainAutoRate": ("Uint32", "R/W"),
"GainAutoTarget": ("Uint32", "R/W"),
"GainMode": ("Enum", "R/W"),
"GainValue": ("Uint32", "R/W"),
"LensDriveCommand": ("Enum", "R/W"),
"LensDriveDuration": ("Uint32", "R/W"),
"LensVoltage": ("Uint32", "R/V"),
"LensVoltageControl": ("Uint32", "R/W"),
"IrisAutoTarget": ("Uint32", "R/W"),
"IrisMode": ("Enum", "R/W"),
"IrisVideoLevel": ("Uint32", "R/W"),
"IrisVideoLevelMax": ("Uint32", "R/W"),
"IrisVideoLevelMin": ("Uint32", "R/W"),
"VsubValue": ("Uint32", "R/C"),
"WhitebalAutoAdjustTol": ("Uint32", "R/W"),
"WhitebalAutoRate": ("Uint32", "R/W"),
"WhitebalMode": ("Enum", "R/W"),
"WhitebalValueRed": ("Uint32", "R/W"),
"WhitebalValueBlue": ("Uint32", "R/W"),
"EventAcquisitionStart": ("Uint32", "R/C 40000"),
"EventAcquisitionEnd": ("Uint32", "R/C 40001"),
"EventFrameTrigger": ("Uint32", "R/C 40002"),
"EventExposureEnd": ("Uint32", "R/C 40003"),
"EventAcquisitionRecordTrigger": ("Uint32", "R/C 40004"),
"EventSyncIn1Rise": ("Uint32", "R/C 40010"),
"EventSyncIn1Fall": ("Uint32", "R/C 40011"),
"EventSyncIn2Rise": ("Uint32", "R/C 40012"),
"EventSyncIn2Fall": ("Uint32", "R/C 40013"),
"EventSyncIn3Rise": ("Uint32", "R/C 40014"),
"EventSyncIn3Fall": ("Uint32", "R/C 40015"),
"EventSyncIn4Rise": ("Uint32", "R/C 40016"),
"EventSyncIn4Fall": ("Uint32", "R/C 40017"),
"EventOverflow": ("Uint32", "R/C 65534"),
"EventError": ("Uint32", "R/C"),
"EventNotification": ("Enum", "R/W"),
"EventSelector": ("Enum", "R/W"),
"EventsEnable1": ("Uint32", "R/W"),
"BandwidthCtrlMode": ("Enum", "R/W"),
"ChunkModeActive": ("Boolean", "R/W"),
"NonImagePayloadSize": ("Unit32", "R/V"),
"PayloadSize": ("Unit32", "R/V"),
"StreamBytesPerSecond": ("Uint32", "R/W"),
"StreamFrameRateConstrain": ("Boolean", "R/W"),
"StreamHoldCapacity": ("Uint32", "R/V"),
"StreamHoldEnable": ("Enum", "R/W"),
"TimeStampFrequency": ("Uint32", "R/C"),
"TimeStampValueHi": ("Uint32", "R/V"),
"TimeStampValueLo": ("Uint32", "R/V"),
"Height": ("Uint32", "R/W"),
"RegionX": ("Uint32", "R/W"),
"RegionY": ("Uint32", "R/W"),
"Width": ("Uint32", "R/W"),
"PixelFormat": ("Enum", "R/W"),
"TotalBytesPerFrame": ("Uint32", "R/V"),
"BinningX": ("Uint32", "R/W"),
"BinningY": ("Uint32", "R/W"),
"CameraName": ("String", "R/W"),
"DeviceFirmwareVersion": ("String", "R/C"),
"DeviceModelName": ("String", "R/W"),
"DevicePartNumber": ("String", "R/C"),
"DeviceSerialNumber": ("String", "R/C"),
"DeviceVendorName": ("String", "R/C"),
"FirmwareVerBuild": ("Uint32", "R/C"),
"FirmwareVerMajor": ("Uint32", "R/C"),
"FirmwareVerMinor": ("Uint32", "R/C"),
"PartClass": ("Uint32", "R/C"),
"PartNumber": ("Uint32", "R/C"),
"PartRevision": ("String", "R/C"),
"PartVersion": ("String", "R/C"),
"SerialNumber": ("String", "R/C"),
"SensorBits": ("Uint32", "R/C"),
"SensorHeight": ("Uint32", "R/C"),
"SensorType": ("Enum", "R/C"),
"SensorWidth": ("Uint32", "R/C"),
"UniqueID": ("Uint32", "R/C"),
"Strobe1ControlledDuration": ("Enum", "R/W"),
"Strobe1Delay": ("Uint32", "R/W"),
"Strobe1Duration": ("Uint32", "R/W"),
"Strobe1Mode": ("Enum", "R/W"),
"SyncIn1GlitchFilter": ("Uint32", "R/W"),
"SyncInLevels": ("Uint32", "R/V"),
"SyncOut1Invert": ("Enum", "R/W"),
"SyncOut1Mode": ("Enum", "R/W"),
"SyncOutGpoLevels": ("Uint32", "R/W"),
"DeviceEthAddress": ("String", "R/C"),
"HostEthAddress": ("String", "R/C"),
"DeviceIPAddress": ("String", "R/C"),
"HostIPAddress": ("String", "R/C"),
"GvcpRetries": ("Uint32", "R/W"),
"GvspLookbackWindow": ("Uint32", "R/W"),
"GvspResentPercent": ("Float32", "R/W"),
"GvspRetries": ("Uint32", "R/W"),
"GvspSocketBufferCount": ("Enum", "R/W"),
"GvspTimeout": ("Uint32", "R/W"),
"HeartbeatInterval": ("Uint32", "R/W"),
"HeartbeatTimeout": ("Uint32", "R/W"),
"MulticastEnable": ("Enum", "R/W"),
"MulticastIPAddress": ("String", "R/W"),
"PacketSize": ("Uint32", "R/W"),
"StatDriverType": ("Enum", "R/V"),
"StatFilterVersion": ("String", "R/C"),
"StatFrameRate": ("Float32", "R/V"),
"StatFramesCompleted": ("Uint32", "R/V"),
"StatFramesDropped": ("Uint32", "R/V"),
"StatPacketsErroneous": ("Uint32", "R/V"),
"StatPacketsMissed": ("Uint32", "R/V"),
"StatPacketsReceived": ("Uint32", "R/V"),
"StatPacketsRequested": ("Uint32", "R/V"),
"StatPacketResent": ("Uint32", "R/V")
}
class AVTCameraInfo(ct.Structure):
"""
AVTCameraInfo is an internal ctypes.Structure-derived class which
contains metadata about cameras on the local network.
Properties include:
* UniqueId
* CameraName
* ModelName
* PartNumber
* SerialNumber
* FirmwareVersion
* PermittedAccess
* InterfaceId
* InterfaceType
"""
_fields_ = [
("StructVer", ct.c_ulong),
("UniqueId", ct.c_ulong),
("CameraName", ct.c_char*32),
("ModelName", ct.c_char*32),
("PartNumber", ct.c_char*32),
("SerialNumber", ct.c_char*32),
("FirmwareVersion", ct.c_char*32),
("PermittedAccess", ct.c_long),
("InterfaceId", ct.c_ulong),
("InterfaceType", ct.c_int)
]
def __repr__(self):
return "<SimpleCV.Camera.AVTCameraInfo - UniqueId: %s>" % (self.UniqueId)
class AVTFrame(ct.Structure):
_fields_ = [
("ImageBuffer", ct.POINTER(ct.c_char)),
("ImageBufferSize", ct.c_ulong),
("AncillaryBuffer", ct.c_int),
("AncillaryBufferSize", ct.c_int),
("Context", ct.c_int*4),
("_reserved1", ct.c_ulong*8),
("Status", ct.c_int),
("ImageSize", ct.c_ulong),
("AncillarySize", ct.c_ulong),
("Width", ct.c_ulong),
("Height", ct.c_ulong),
("RegionX", ct.c_ulong),
("RegionY", ct.c_ulong),
("Format", ct.c_int),
("BitDepth", ct.c_ulong),
("BayerPattern", ct.c_int),
("FrameCount", ct.c_ulong),
("TimestampLo", ct.c_ulong),
("TimestampHi", ct.c_ulong),
("_reserved2", ct.c_ulong*32)
]
def __init__(self, buffersize):
self.ImageBuffer = ct.create_string_buffer(buffersize)
self.ImageBufferSize = ct.c_ulong(buffersize)
self.AncillaryBuffer = 0
self.AncillaryBufferSize = 0
self.img = None
self.hasImage = False
self.frame = None
def __del__(self):
#This function should disconnect from the AVT Camera
pverr(self.dll.PvCameraClose(self.handle))
def __init__(self, camera_id = -1, properties = {}, threaded = False):
#~ super(AVTCamera, self).__init__()
import platform
if platform.system() == "Windows":
self.dll = ct.windll.LoadLibrary("PvAPI.dll")
elif platform.system() == "Darwin":
self.dll = ct.CDLL("libPvAPI.dylib", ct.RTLD_GLOBAL)
else:
self.dll = ct.CDLL("libPvAPI.so")
if not self._pvinfo.get("initialized", False):
self.dll.PvInitialize()
self._pvinfo['initialized'] = True
#initialize. Note that we rely on listAllCameras being the next
#call, since it blocks on cameras initializing
camlist = self.listAllCameras()
if not len(camlist):
raise Exception("Couldn't find any cameras with the PvAVT driver. Use SampleViewer to confirm you have one connected.")
if camera_id < 9000: #camera was passed as an index reference
if camera_id == -1: #accept -1 for "first camera"
camera_id = 0
camera_id = camlist[camera_id].UniqueId
camera_id = long(camera_id)
self.handle = ct.c_uint()
init_count = 0
while self.dll.PvCameraOpen(camera_id,0,ct.byref(self.handle)) != 0: #wait until camera is availble
if init_count > 4: # Try to connect 5 times before giving up
raise Exception('Could not connect to camera, please verify with SampleViewer you can connect')
init_count += 1
time.sleep(1) # sleep and retry to connect to camera in a second
pverr(self.dll.PvCaptureStart(self.handle))
self.uniqueid = camera_id
self.setProperty("AcquisitionMode","SingleFrame")
self.setProperty("FrameStartTriggerMode","Freerun")
if properties.get("mode", "RGB") == 'gray':
self.setProperty("PixelFormat", "Mono8")
else:
self.setProperty("PixelFormat", "Rgb24")
#give some compatablity with other cameras
if properties.get("mode", ""):
properties.pop("mode")
if properties.get("height", ""):
properties["Height"] = properties["height"]
properties.pop("height")
if properties.get("width", ""):
properties["Width"] = properties["width"]
properties.pop("width")
for p in properties:
self.setProperty(p, properties[p])
if threaded:
self._thread = AVTCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
self.threaded = True
self.frame = None
self._refreshFrameStats()
def restart(self):
"""
This tries to restart the camera thread
"""
self._thread.stop()
self._thread = AVTCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
def listAllCameras(self):
"""
**SUMMARY**
List all cameras attached to the host
**RETURNS**
List of AVTCameraInfo objects, otherwise empty list
"""
camlist = (self.AVTCameraInfo*100)()
starttime = time.time()
while int(camlist[0].UniqueId) == 0 and time.time() - starttime < 10:
self.dll.PvCameraListEx(ct.byref(camlist), 100, None, ct.sizeof(self.AVTCameraInfo))
time.sleep(0.1) #keep checking for cameras until timeout
return [cam for cam in camlist if cam.UniqueId != 0]
def runCommand(self,command):
"""
**SUMMARY**
Runs a PvAVT Command on the camera
Valid Commands include:
* FrameStartTriggerSoftware
* AcquisitionAbort
* AcquisitionStart
* AcquisitionStop
* ConfigFileLoad
* ConfigFileSave
* TimeStampReset
* TimeStampValueLatch
**RETURNS**
0 on success
**EXAMPLE**
>>>c = AVTCamera()
>>>c.runCommand("TimeStampReset")
"""
return self.dll.PvCommandRun(self.handle,command)
def getProperty(self, name):
"""
**SUMMARY**
This retrieves the value of the AVT Camera attribute
There are around 140 properties for the AVT Camera, so reference the
AVT Camera and Driver Attributes pdf that is provided with
the driver for detailed information
Note that the error codes are currently ignored, so empty values
may be returned.
**EXAMPLE**
>>>c = AVTCamera()
>>>print c.getProperty("ExposureValue")
"""
valtype, perm = self._properties.get(name, (None, None))
if not valtype:
return None
val = ''
err = 0
if valtype == "Enum":
val = ct.create_string_buffer(100)
vallen = ct.c_long()
err = self.dll.PvAttrEnumGet(self.handle, name, val, 100, ct.byref(vallen))
val = str(val[:vallen.value])
elif valtype == "Uint32":
val = ct.c_uint()
err = self.dll.PvAttrUint32Get(self.handle, name, ct.byref(val))
val = int(val.value)
elif valtype == "Float32":
val = ct.c_float()
err = self.dll.PvAttrFloat32Get(self.handle, name, ct.byref(val))
val = float(val.value)
elif valtype == "String":
val = ct.create_string_buffer(100)
vallen = ct.c_long()
err = self.dll.PvAttrStringGet(self.handle, name, val, 100, ct.byref(vallen))
val = str(val[:vallen.value])
elif valtype == "Boolean":
val = ct.c_bool()
err = self.dll.PvAttrBooleanGet(self.handle, name, ct.byref(val))
val = bool(val.value)
#TODO, handle error codes
return val
#TODO, implement the PvAttrRange* functions
#def getPropertyRange(self, name)
def getAllProperties(self):
"""
**SUMMARY**
This returns a dict with the name and current value of the
documented PvAVT attributes
CAVEAT: it addresses each of the properties individually, so
this may take time to run if there's network latency
**EXAMPLE**
>>>c = AVTCamera(0)
>>>props = c.getAllProperties()
>>>print props['ExposureValue']
"""
props = {}
for p in self._properties.keys():
props[p] = self.getProperty(p)
return props
def setProperty(self, name, value, skip_buffer_size_check=False):
"""
**SUMMARY**
This sets the value of the AVT Camera attribute.
There are around 140 properties for the AVT Camera, so reference the
AVT Camera and Driver Attributes pdf that is provided with
the driver for detailed information
By default, we will also refresh the height/width and bytes per
frame we're expecting -- you can manually bypass this if you want speed
Returns the raw PvAVT error code (0 = success)
**Example**
>>>c = AVTCamera()
>>>c.setProperty("ExposureValue", 30000)
>>>c.getImage().show()
"""
valtype, perm = self._properties.get(name, (None, None))
if not valtype:
return None
if valtype == "Uint32":
err = self.dll.PvAttrUint32Set(self.handle, name, ct.c_uint(int(value)))
elif valtype == "Float32":
err = self.dll.PvAttrFloat32Set(self.handle, name, ct.c_float(float(value)))
elif valtype == "Enum":
err = self.dll.PvAttrEnumSet(self.handle, name, str(value))
elif valtype == "String":
err = self.dll.PvAttrStringSet(self.handle, name, str(value))
elif valtype == "Boolean":
err = self.dll.PvAttrBooleanSet(self.handle, name, ct.c_bool(bool(value)))
#just to be safe, re-cache the camera metadata
if not skip_buffer_size_check:
self._refreshFrameStats()
return err
def getImage(self, timeout = 5000):
"""
**SUMMARY**
Extract an Image from the Camera, returning the value. No matter
what the image characteristics on the camera, the Image returned
will be RGB 8 bit depth, if camera is in greyscale mode it will
be 3 identical channels.
**EXAMPLE**
>>>c = AVTCamera()
>>>c.getImage().show()
"""
if self.frame != None:
st = time.time()
try:
pverr( self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(self.frame), timeout) )
except Exception as e:
print("Exception waiting for frame:", e)
print("Time taken:",time.time() - st)
self.frame = None
raise(e)
img = self.unbuffer()
self.frame = None
return img
elif self.threaded:
self._thread.lock.acquire()
try:
img = self._buffer.pop()
self._lastimage = img
except IndexError:
img = self._lastimage
self._thread.lock.release()
else:
self.runCommand("AcquisitionStart")
frame = self._getFrame(timeout)
img = Image(pil.fromstring(self.imgformat,
(self.width, self.height),
frame.ImageBuffer[:int(frame.ImageBufferSize)]))
self.runCommand("AcquisitionStop")
return img
def setupASyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('FrameStartTriggerMode','Software')
def setupSyncMode(self):
self.setProperty('AcquisitionMode','Continuous')
self.setProperty('FrameStartTriggerMode','FreeRun')
def unbuffer(self):
img = Image(pil.fromstring(self.imgformat,
(self.width, self.height),
self.frame.ImageBuffer[:int(self.frame.ImageBufferSize)]))
return img
def _refreshFrameStats(self):
self.width = self.getProperty("Width")
self.height = self.getProperty("Height")
self.buffersize = self.getProperty("TotalBytesPerFrame")
self.pixelformat = self.getProperty("PixelFormat")
self.imgformat = 'RGB'
if self.pixelformat == 'Mono8':
self.imgformat = 'L'
def _getFrame(self, timeout = 5000):
#return the AVTFrame object from the camera, timeout in ms
#need to multiply by bitdepth
try:
frame = self.AVTFrame(self.buffersize)
pverr( self.dll.PvCaptureQueueFrame(self.handle, ct.byref(frame), None) )
st = time.time()
try:
pverr( self.dll.PvCaptureWaitForFrameDone(self.handle, ct.byref(frame), timeout) )
except Exception as e:
print("Exception waiting for frame:", e)
print("Time taken:",time.time() - st)
raise(e)
except Exception as e:
print("Exception aquiring frame:", e)
raise(e)
return frame
def acquire(self):
self.frame = self.AVTFrame(self.buffersize)
try:
self.runCommand("AcquisitionStart")
pverr( self.dll.PvCaptureQueueFrame(self.handle, ct.byref(self.frame), None) )
self.runCommand("AcquisitionStop")
except Exception as e:
print("Exception aquiring frame:", e)
raise(e)
class GigECamera(Camera):
"""
GigE Camera driver via Aravis
"""
def __init__(self, camera_id = None, properties = {}, threaded = False):
try:
from gi.repository import Aravis
except:
print("GigE is supported by the Aravis library, download and build from https://github.com/sightmachine/aravis")
print("Note that you need to set GI_TYPELIB_PATH=$GI_TYPELIB_PATH:(PATH_TO_ARAVIS)/src for the GObject Introspection")
sys.exit()
self._cam = Aravis.Camera.new (None)
self._pixel_mode = "RGB"
if properties.get("mode", False):
self._pixel_mode = properties.pop("mode")
if self._pixel_mode == "gray":
self._cam.set_pixel_format (Aravis.PIXEL_FORMAT_MONO_8)
else:
self._cam.set_pixel_format (Aravis.PIXEL_FORMAT_BAYER_BG_8) #we'll use bayer (basler cams)
#TODO, deal with other pixel formats
if properties.get("roi", False):
roi = properties['roi']
self._cam.set_region(*roi)
#TODO, check sensor size
if properties.get("width", False):
#TODO, set internal function to scale results of getimage
pass
if properties.get("framerate", False):
self._cam.set_frame_rate(properties['framerate'])
self._stream = self._cam.create_stream (None, None)
payload = self._cam.get_payload()
self._stream.push_buffer(Aravis.Buffer.new_allocate (payload))
[x,y,width,height] = self._cam.get_region ()
self._height, self._width = height, width
def getImage(self):
camera = self._cam
camera.start_acquisition()
buff = self._stream.pop_buffer()
self.capturetime = buff.timestamp_ns / 1000000.0
img = np.fromstring(ct.string_at(buff.data_address(), buff.size), dtype = np.uint8).reshape(self._height, self._width)
rgb = cv2.cvtColor(img, cv2.COLOR_BAYER_BG2BGR)
self._stream.push_buffer(buff)
camera.stop_acquisition()
#TODO, we should handle software triggering (separate capture and get image events)
return Image(rgb)
def getPropertyList(self):
l = [
'available_pixel_formats',
'available_pixel_formats_as_display_names',
'available_pixel_formats_as_strings',
'binning',
'device_id',
'exposure_time',
'exposure_time_bounds',
'frame_rate',
'frame_rate_bounds',
'gain',
'gain_bounds',
'height_bounds',
'model_name',
'payload',
'pixel_format',
'pixel_format_as_string',
'region',
'sensor_size',
'trigger_source',
'vendor_name',
'width_bounds'
]
return l
def getProperty(self, name = None):
'''
This function get's the properties availble to the camera
Usage:
> camera.getProperty('region')
> (0, 0, 128, 128)
Available Properties:
see function camera.getPropertyList()
'''
if name == None:
print("You need to provide a property, available properties are:")
print("")
for p in self.getPropertyList():
print(p)
return
stringval = "get_{}".format(name)
try:
return getattr(self._cam, stringval)()
except:
print('Property {} does not appear to exist'.format(name))
return None
def setProperty(self, name = None, *args):
'''
This function sets the property available to the camera
Usage:
> camera.setProperty('region',(256,256))
Available Properties:
see function camera.getPropertyList()
'''
if name == None:
print("You need to provide a property, available properties are:")
print("")
for p in self.getPropertyList():
print(p)
return
if len(args) <= 0:
print("You must provide a value to set")
return
stringval = "set_{}".format(name)
try:
return getattr(self._cam, stringval)(*args)
except:
print('Property {} does not appear to exist or value is not in correct format'.format(name))
return None
def getAllProperties(self):
'''
This function just prints out all the properties available to the camera
'''
for p in self.getPropertyList():
print("{}: {}".format(p,self.getProperty(p)))
class VimbaCameraThread(threading.Thread):
camera = None
run = True
verbose = False
lock = None
logger = None
framerate = 0
def __init__(self, camera):
super(VimbaCameraThread, self).__init__()
self._stop = threading.Event()
self.camera = camera
self.lock = threading.Lock()
self.name = 'Thread-Camera-ID-' + str(self.camera.uniqueid)
def run(self):
counter = 0
timestamp = time.time()
while self.run:
self.lock.acquire()
img = self.camera._captureFrame(1000)
self.camera._buffer.appendleft(img)
self.lock.release()
counter += 1
time.sleep(0.01)
if time.time() - timestamp >= 1:
self.camera.framerate = counter
counter = 0
timestamp = time.time()
def stop(self):
self._stop.set()
def stopped(self):
return self._stop.isSet()
class VimbaCamera(FrameSource):
"""
**SUMMARY**
VimbaCamera is a wrapper for the Allied Vision cameras,
such as the "manta" series.
This requires the
1) Vimba SDK provided from Allied Vision
http://www.alliedvisiontec.com/us/products/software/vimba-sdk.html
2) Pyvimba Python library
TODO: <INSERT URL>
Note that as of time of writing, the VIMBA driver is not available
for Mac.
All camera properties are directly from the Vimba SDK manual -- if not
specified it will default to whatever the camera state is. Cameras
can either by
**EXAMPLE**
>>> cam = VimbaCamera(0, {"width": 656, "height": 492})
>>>
>>> img = cam.getImage()
>>> img.show()
"""
def _setupVimba(self):
from pymba import Vimba
self._vimba = Vimba()
self._vimba.startup()
system = self._vimba.getSystem()
if system.GeVTLIsPresent:
system.runFeatureCommand("GeVDiscoveryAllOnce")
time.sleep(0.2)
def __del__(self):
#This function should disconnect from the Vimba Camera
if self._camera is not None:
if self.threaded:
self._thread.stop()
time.sleep(0.2)
if self._frame is not None:
self._frame.revokeFrame()
self._frame = None
self._camera.closeCamera()
self._vimba.shutdown()
def shutdown(self):
"""You must call this function if you are using threaded=true when you are finished
to prevent segmentation fault"""
# REQUIRED TO PREVENT SEGMENTATION FAULT FOR THREADED=True
if (self._camera):
self._camera.closeCamera()
self._vimba.shutdown()
def __init__(self, camera_id = -1, properties = {}, threaded = False):
if not VIMBA_ENABLED:
raise Exception("You don't seem to have the pymba library installed. This will make it hard to use a AVT Vimba Camera.")
self._vimba = None
self._setupVimba()
camlist = self.listAllCameras()
self._camTable = {}
self._frame = None
self._buffer = None # Buffer to store images
self._buffersize = 10 # Number of images to keep in the rolling image buffer for threads
self._lastimage = None # Last image loaded into memory
self._thread = None
self._framerate = 0
self.threaded = False
self._properties = {}
self._camera = None
i = 0
for cam in camlist:
self._camTable[i] = {'id': cam.cameraIdString}
i += 1
if not len(camlist):
raise Exception("Couldn't find any cameras with the Vimba driver. Use VimbaViewer to confirm you have one connected.")
if camera_id < 9000: #camera was passed as an index reference
if camera_id == -1: #accept -1 for "first camera"
camera_id = 0
if (camera_id > len(camlist)):
raise Exception("Couldn't find camera at index %d." % camera_id)
cam_guid = camlist[camera_id].cameraIdString
else:
raise Exception("Index %d is too large" % camera_id)
self._camera = self._vimba.getCamera(cam_guid)
self._camera.openCamera()
self.uniqueid = cam_guid
self.setProperty("AcquisitionMode","SingleFrame")
self.setProperty("TriggerSource","Freerun")
# TODO: FIX
if properties.get("mode", "RGB") == 'gray':
self.setProperty("PixelFormat", "Mono8")
else:
fmt = "RGB8Packed" # alternatively use BayerRG8
self.setProperty("PixelFormat", "BayerRG8")
#give some compatablity with other cameras
if properties.get("mode", ""):
properties.pop("mode")
if properties.get("height", ""):
properties["Height"] = properties["height"]
properties.pop("height")
if properties.get("width", ""):
properties["Width"] = properties["width"]
properties.pop("width")
for p in properties:
self.setProperty(p, properties[p])
if threaded:
self._thread = VimbaCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
self.threaded = True
self._refreshFrameStats()
def restart(self):
"""
This tries to restart the camera thread
"""
self._thread.stop()
self._thread = VimbaCameraThread(self)
self._thread.daemon = True
self._buffer = deque(maxlen=self._buffersize)
self._thread.start()
def listAllCameras(self):
"""
**SUMMARY**
List all cameras attached to the host
**RETURNS**
List of VimbaCamera objects, otherwise empty list
VimbaCamera objects are defined in the pymba module
"""
cameraIds = self._vimba.getCameraIds()
ar = []
for cameraId in cameraIds:
ar.append(self._vimba.getCamera(cameraId))
return ar
def runCommand(self,command):
"""
**SUMMARY**
Runs a Vimba Command on the camera
Valid Commands include:
* AcquisitionAbort
* AcquisitionStart
* AcquisitionStop
**RETURNS**
0 on success
**EXAMPLE**
>>>c = VimbaCamera()
>>>c.runCommand("TimeStampReset")
"""
return self._camera.runFeatureCommand(command)
def getProperty(self, name):
"""
**SUMMARY**
This retrieves the value of the Vimba Camera attribute
There are around 140 properties for the Vimba Camera, so reference the
Vimba Camera pdf that is provided with
the SDK for detailed information
Throws VimbaException if property is not found or not implemented yet.
**EXAMPLE**
>>>c = VimbaCamera()
>>>print c.getProperty("ExposureMode")
"""
return self._camera.__getattr__(name)
#TODO, implement the PvAttrRange* functions
#def getPropertyRange(self, name)
def getAllProperties(self):
"""
**SUMMARY**
This returns a dict with the name and current value of the
documented Vimba attributes
CAVEAT: it addresses each of the properties individually, so
this may take time to run if there's network latency
**EXAMPLE**
>>>c = VimbaCamera(0)
>>>props = c.getAllProperties()
>>>print props['ExposureMode']
"""
from pymba import VimbaException
# TODO
ar = {}
c = self._camera
cameraFeatureNames = c.getFeatureNames()
for name in cameraFeatureNames:
try:
ar[name] = c.__getattr__(name)
except VimbaException:
# Ignore features not yet implemented
pass
return ar
def setProperty(self, name, value, skip_buffer_size_check=False):
"""
**SUMMARY**
This sets the value of the Vimba Camera attribute.
There are around 140 properties for the Vimba Camera, so reference the
Vimba Camera pdf that is provided with
the SDK for detailed information
Throws VimbaException if property not found or not yet implemented
**Example**
>>>c = VimbaCamera()
>>>c.setProperty("ExposureAutoRate", 200)
>>>c.getImage().show()
"""
ret = self._camera.__setattr__(name, value)
#just to be safe, re-cache the camera metadata
if not skip_buffer_size_check:
self._refreshFrameStats()
return ret
def getImage(self):
"""
**SUMMARY**
Extract an Image from the Camera, returning the value. No matter
what the image characteristics on the camera, the Image returned
will be RGB 8 bit depth, if camera is in greyscale mode it will
be 3 identical channels.
**EXAMPLE**
>>>c = VimbaCamera()
>>>c.getImage().show()
"""
if self.threaded:
self._thread.lock.acquire()
try:
img = self._buffer.pop()
self._lastimage = img
except IndexError:
img = self._lastimage
self._thread.lock.release()
else:
img = self._captureFrame()
return img
def setupASyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('TriggerSource','Software')
def setupSyncMode(self):
self.setProperty('AcquisitionMode','SingleFrame')
self.setProperty('TriggerSource','Freerun')
def _refreshFrameStats(self):
self.width = self.getProperty("Width")
self.height = self.getProperty("Height")
self.pixelformat = self.getProperty("PixelFormat")
self.imgformat = 'RGB'
if self.pixelformat == 'Mono8':
self.imgformat = 'L'
def _getFrame(self):
if not self._frame:
self._frame = self._camera.getFrame() # creates a frame
self._frame.announceFrame()
return self._frame
def _captureFrame(self, timeout = 5000):
try:
c = self._camera
f = self._getFrame()
colorSpace = ColorSpace.BGR
if self.pixelformat == 'Mono8':
colorSpace = ColorSpace.GRAY
c.startCapture()
f.queueFrameCapture()
c.runFeatureCommand('AcquisitionStart')
c.runFeatureCommand('AcquisitionStop')
try:
f.waitFrameCapture(timeout)
except Exception as e:
print("Exception waiting for frame: %s: %s" % (e, traceback.format_exc()))
raise(e)
imgData = f.getBufferByteData()
moreUsefulImgData = np.ndarray(buffer = imgData,
dtype = np.uint8,
shape = (f.height, f.width, 1))
rgb = cv2.cvtColor(moreUsefulImgData, cv2.COLOR_BAYER_RG2RGB)
c.endCapture()
return Image(rgb, colorSpace=colorSpace, cv2image=imgData)
except Exception as e:
print("Exception acquiring frame: %s: %s" % (e, traceback.format_exc()))
raise(e)
| bsd-3-clause | -3,045,522,090,509,219,300 | 33.480523 | 271 | 0.561234 | false |
webvalley2013/nanothings | process/models.py | 1 | 2097 | # This file is part of nanothings.
#
# nanothings is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero GPL as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nanothings is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero GPL for more details.
#
# You should have received a copy of the GNU Affero GPL
# along with nanothings. If not, see <http://www.gnu.org/licenses/>.
# MODULES
from django.db import models
import jsonfield
import djcelery
class Process(models.Model):
TYPE_CHOICES = (
('plr', 'plr'),
('hadoop', 'hadoop'),
('3d', '3d')
)
# Fields
code = models.CharField(max_length=40, unique=True)
description = models.TextField(blank=True, null=True)
author = models.CharField(max_length=40)
date = models.DateTimeField()
type = models.CharField(choices=TYPE_CHOICES, max_length=10)
inputs = jsonfield.JSONField()
outputs = jsonfield.JSONField()
def __unicode__(self):
return u'%s' % (self.code)
class RunningProcess(models.Model):
# Fields
process_type = models.ForeignKey(Process)
task_id = models.CharField(max_length=36)
inputs = jsonfield.JSONField()
started = models.DateTimeField()
# From id returns task result
@property
def celery_task(self):
return djcelery.celery.AsyncResult(self.task_id)
# Check if the task has finished
@property
def finished(self):
return self.celery_task.ready()
# Return the current status of the task
@property
def status(self):
return self.celery_task.status
# Returns the result of the task
@property
def result(self):
return self.celery_task.get()
# Returns the time when the task has finished
@property
def finished_time(self):
return 0 # tmp
| agpl-3.0 | -6,324,198,666,409,698,000 | 27.337838 | 73 | 0.66619 | false |
openfisca/openfisca-tunisia | openfisca_tunisia/reforms/de_net_a_imposable.py | 1 | 2513 | from __future__ import division
from openfisca_tunisia.model.base import *
from openfisca_tunisia import entities
from numpy.ma.testutils import assert_not_equal
from urllib.request import Request
try:
from scipy.optimize import fsolve
except ImportError:
fsolve = None
def calculate_net_from(salaire_imposable, individu, period, requested_variable_names):
# Work in isolation
temp_simulation = individu.simulation.clone()
temp_individu = temp_simulation.individu
# Calculated variable holders might contain undesired cache
# (their entity.simulation points to the original simulation above)
for name in requested_variable_names:
temp_individu.get_holder(name).delete_arrays(period)
# We're not wanting to calculate salaire_imposable again,
# but instead manually set it as an input variable
temp_individu.get_holder('salaire_imposable').set_input(period, salaire_imposable)
# Force recomputing of salaire_net_a_payer
temp_individu.get_holder('salaire_net_a_payer').delete_arrays(period)
net = temp_individu('salaire_net_a_payer', period)[0]
return net
class salaire_imposable(Variable):
value_type = float
entity = Individu
label = "Salaire imposable"
definition_period = MONTH
set_input = set_input_divide_by_period
def formula(individu, period):
# Use numerical inversion to calculate 'salaire_imposable' from 'salaire_net_a_payer'
net = individu.get_holder('salaire_net_a_payer').get_array(period)
if net is None:
return individu.empty_array()
simulation = individu.simulation
simulation.period = period
# List of variables already calculated.
# We will need it to remove their holders, that might contain undesired cache
requested_variable_names = [stack_frame['name'] for stack_frame in individu.simulation.tracer.stack]
def solve_func(net):
def innerfunc(essai):
return calculate_net_from(essai, individu, period, requested_variable_names) - net
return innerfunc
imposable_calcule = \
fsolve(
solve_func(net),
net * 1.25, # on entend souvent parler cette méthode...
xtol = 1 / 100 # précision
)
return imposable_calcule
class de_net_a_imposable(Reform):
name = 'Inversion du calcul imposable -> net'
def apply(self):
self.update_variable(salaire_imposable)
| agpl-3.0 | 3,613,771,849,119,785,000 | 32.039474 | 108 | 0.680207 | false |
SUSE/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/log_profile_resource_patch.py | 1 | 2775 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LogProfileResourcePatch(Model):
"""The log profile resource for patch operations.
:param tags: Resource tags
:type tags: dict
:param storage_account_id: the resource id of the storage account to which
you would like to send the Activity Log.
:type storage_account_id: str
:param service_bus_rule_id: The service bus rule ID of the service bus
namespace in which you would like to have Event Hubs created for streaming
the Activity Log. The rule ID is of the format: '{service bus resource
ID}/authorizationrules/{key name}'.
:type service_bus_rule_id: str
:param locations: List of regions for which Activity Log events should be
stored or streamed. It is a comma separated list of valid ARM locations
including the 'global' location.
:type locations: list of str
:param categories: the categories of the logs. These categories are
created as is convenient to the user. Some values are: 'Write', 'Delete',
and/or 'Action.'
:type categories: list of str
:param retention_policy: the retention policy for the events in the log.
:type retention_policy: :class:`RetentionPolicy
<azure.mgmt.monitor.models.RetentionPolicy>`
"""
_validation = {
'locations': {'required': True},
'categories': {'required': True},
'retention_policy': {'required': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'},
'service_bus_rule_id': {'key': 'properties.serviceBusRuleId', 'type': 'str'},
'locations': {'key': 'properties.locations', 'type': '[str]'},
'categories': {'key': 'properties.categories', 'type': '[str]'},
'retention_policy': {'key': 'properties.retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(self, locations, categories, retention_policy, tags=None, storage_account_id=None, service_bus_rule_id=None):
self.tags = tags
self.storage_account_id = storage_account_id
self.service_bus_rule_id = service_bus_rule_id
self.locations = locations
self.categories = categories
self.retention_policy = retention_policy
| mit | -8,665,037,862,880,395,000 | 43.758065 | 126 | 0.63964 | false |
unreal666/outwiker | plugins/diagrammer/diagrammer/libs/blockdiag/noderenderer/dots.py | 2 | 1737 | # -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from blockdiag.noderenderer import install_renderer
from blockdiag.noderenderer.base import NodeShape
from blockdiag.utils import Box, XY
class Dots(NodeShape):
def render_label(self, drawer, **kwargs):
pass
def render_shape(self, drawer, _, **kwargs):
if kwargs.get('shadow'):
return
m = self.metrics
center = m.cell(self.node).center
dots = [center]
if self.node.group.orientation == 'landscape':
pt = XY(center.x, center.y - m.node_height / 2)
dots.append(pt)
pt = XY(center.x, center.y + m.node_height / 2)
dots.append(pt)
else:
pt = XY(center.x - m.node_width / 3, center.y)
dots.append(pt)
pt = XY(center.x + m.node_width / 3, center.y)
dots.append(pt)
r = m.cellsize / 2
for dot in dots:
box = Box(dot.x - r, dot.y - r, dot.x + r, dot.y + r)
drawer.ellipse(box, fill=self.node.linecolor,
outline=self.node.linecolor)
def setup(self):
install_renderer('dots', Dots)
| gpl-3.0 | -4,966,972,733,427,709,000 | 31.773585 | 75 | 0.617732 | false |
houssine78/addons | website_product_subscription/controllers/main.py | 1 | 8581 | # -*- coding: utf-8 -*-
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
from openerp.exceptions import ValidationError
class WebsiteProductSubscription(http.Controller):
@http.route(['/page/login_subscriber',
'/login_subscriber'],
type='http',
auth="user",
website=True)
def login_subscriber(self, **kwargs):
return request.redirect("/page/become_subscriber")
@http.route(['/page/become_subscriber',
'/become_subscriber'],
type='http',
auth="public",
website=True)
def display_subscription_page(self, **kwargs):
values = {}
values = self.fill_values(values, True)
for field in ['email', 'firstname', 'lastname', 'address', 'city',
'zip_code', 'country_id', 'error_msg']:
if kwargs.get(field):
values[field] = kwargs.pop(field)
values.update(kwargs=kwargs.items())
return request.website.render("website_product_subscription.becomesubscriber", values)
def fill_values(self, values, load_from_user=False):
sub_temp_obj = request.env['product.subscription.template']
if load_from_user:
# the subscriber is connected
if request.env.user.login != 'public':
values['logged'] = 'on'
partner = request.env.user.partner_id
values['firstname'] = partner.firstname
values['lastname'] = partner.lastname
values['email'] = partner.email
values['street'] = partner.street
values['zip_code'] = partner.zip
values['city'] = partner.city
values['country_id'] = partner.country_id.id
if partner.parent_id:
values['company'] = partner.parent_id.display_name
if not values.get('product_subscription_id', False):
values['product_subscription_id'] = 0
values['subscriptions'] = sub_temp_obj.sudo().search([('publish', '=', True)])
values['countries'] = self.get_countries()
if not values.get('country_id'):
values['country_id'] = '21'
return values
def get_countries(self):
countries = request.env['res.country'].sudo().search([])
return countries
def get_address(self, kwargs):
vals = {'zip': kwargs.get("zip_code"),
'city': kwargs.get("city"),
'country_id': kwargs.get("country_id")}
address = kwargs.get("street") + ', ' + kwargs.get("street_number")
if kwargs.get("box").strip() != '':
address = address + ', ' + kwargs.get("box").strip()
vals['street'] = address
return vals
def get_receiver(self, kwargs):
vals = {'email': kwargs.get("subscriber_email"),
'out_inv_comm_type': 'bba',
'out_inv_comm_algorithm': 'random'}
firstname = kwargs.get("subscriber_firstname").title()
lastname = kwargs.get("subscriber_lastname").upper()
vals['name'] = firstname + ' ' + lastname
vals['firstname'] = firstname
vals['lastname'] = lastname
vals["customer"] = True
return vals
@http.route(['/product_subscription/subscribe'], type='http', auth="public", website=True)
def product_subscription(self, **kwargs):
partner_obj = request.env['res.partner']
user_obj = request.env['res.users']
values = {}
redirect = "website_product_subscription.becomesubscriber"
if 'g-recaptcha-response' not in kwargs or not request.website.is_captcha_valid(kwargs['g-recaptcha-response']):
values = self.fill_values(values)
values.update(kwargs)
values["error_msg"] = _("the captcha has not been validated, "
"please fill in the captcha")
return request.website.render(redirect, values)
logged = kwargs.get("logged") == 'on'
gift = kwargs.get("gift") == 'on'
if not logged and kwargs.get("email") != kwargs.get("email_confirmation"):
values = self.fill_values(values)
values.update(kwargs)
values["error_msg"] = _("email and confirmation email doesn't match")
return request.website.render(redirect, values)
if not logged and 'email' in kwargs:
user = user_obj.sudo().search([('login', '=', kwargs.get("email"))])
if user:
values = self.fill_values(values)
values.update(kwargs)
values["error_msg"] = _("There is an existing account for "
"this mail address. Please login "
"before fill in the form")
return request.website.render(redirect, values)
if gift:
values["gift"] = gift
subscriber = False
sponsor = False
subscriber_vals = {}
if logged:
subscriber = request.env.user.partner_id
address = self.get_address(kwargs)
if gift:
sponsor = request.env.user.partner_id
subscriber_vals.update(self.get_receiver(kwargs))
subscriber_vals.update(address)
subscriber = partner_obj.sudo().create(subscriber_vals)
else:
subscriber.sudo().write(address)
else:
lastname = kwargs.get("lastname").upper()
firstname = kwargs.get("firstname").title()
subscriber_vals["name"] = firstname + " " + lastname
subscriber_vals["lastname"] = lastname
subscriber_vals["firstname"] = firstname
subscriber_vals["email"] = kwargs.get("email")
subscriber_vals["out_inv_comm_type"] = 'bba'
subscriber_vals["out_inv_comm_algorithm"] = 'random'
subscriber_vals["customer"] = True
if gift:
receiver_vals = self.get_receiver(kwargs)
receiver_vals.update(self.get_address(kwargs))
subscriber = partner_obj.sudo().create(receiver_vals)
sponsor = partner_obj.sudo().create(subscriber_vals)
else:
subscriber_vals.update(self.get_address(kwargs))
subscriber = partner_obj.sudo().create(subscriber_vals)
values['subscriber'] = subscriber.id
user_values = {'partner_id': subscriber.id, 'login': subscriber.email}
if sponsor:
values['sponsor'] = sponsor.id
user_values['partner_id'] = sponsor.id
user_values['login'] = sponsor.email
values["subscription_template"] = int(kwargs.get("product_subscription_id"))
request.env['product.subscription.request'].sudo().create(values)
if not logged:
if "company" in kwargs and kwargs.get("company").strip() != '':
vat_number = ''
if "vat_number" in kwargs and kwargs.get("vat_number").strip() != '':
vat_number = kwargs.get("vat_number").strip()
company_vals = {
'name': kwargs.get("company"),
'email': subscriber.email,
'out_inv_comm_type': 'bba',
'out_inv_comm_algorithm': 'random',
'vat': vat_number,
}
try:
company = partner_obj.sudo().create(company_vals)
except ValidationError as ve:
values = self.fill_values(values)
values.update(kwargs)
values['error_msg'] = ve.name
return request.website.render(redirect, values)
subscriber.sudo().write({'parent_id': company.id})
# create user last to avoid creating a user when
# an error occurs
user_id = user_obj.sudo()._signup_create_user(user_values)
user = user_obj.browse(user_id)
user.sudo().with_context({'create_user': True}).action_reset_password()
return request.website.render('website_product_subscription.product_subscription_thanks',values)
| agpl-3.0 | 5,936,635,877,928,867,000 | 39.65534 | 120 | 0.53735 | false |
closeio/mongoengine | tests/queryset/queryset.py | 1 | 117581 | import sys
sys.path[0:0] = [""]
import unittest
import uuid
from nose.plugins.skip import SkipTest
from datetime import datetime, timedelta
import pymongo
from pymongo.errors import ConfigurationError
from pymongo.read_preferences import ReadPreference
from bson import ObjectId
from mongoengine import *
from mongoengine.connection import get_connection
from mongoengine.python_support import PY3
from mongoengine.context_managers import query_counter
from mongoengine.queryset import (QuerySet, QuerySetManager,
MultipleObjectsReturned, DoesNotExist,
queryset_manager)
from mongoengine.errors import InvalidQueryError
__all__ = ("QuerySetTest",)
class QuerySetTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
class Person(Document):
name = StringField()
age = IntField()
meta = {'allow_inheritance': True}
Person.drop_collection()
self.Person = Person
def test_initialisation(self):
"""Ensure that a QuerySet is correctly initialised by QuerySetManager.
"""
self.assertTrue(isinstance(self.Person.objects, QuerySet))
self.assertEqual(self.Person.objects._collection.name,
self.Person._get_collection_name())
self.assertTrue(isinstance(self.Person.objects._collection,
pymongo.collection.Collection))
def test_cannot_perform_joins_references(self):
class BlogPost(Document):
author = ReferenceField(self.Person)
author2 = GenericReferenceField()
def test_reference():
list(BlogPost.objects(author__name="test"))
self.assertRaises(InvalidQueryError, test_reference)
def test_generic_reference():
list(BlogPost.objects(author2__name="test"))
def test_find(self):
"""Ensure that a query returns a valid set of results.
"""
self.Person(name="User A", age=20).save()
self.Person(name="User B", age=30).save()
# Find all people in the collection
people = self.Person.objects
self.assertEqual(people.count(), 2)
results = list(people)
self.assertTrue(isinstance(results[0], self.Person))
self.assertTrue(isinstance(results[0].id, (ObjectId, str, unicode)))
self.assertEqual(results[0].name, "User A")
self.assertEqual(results[0].age, 20)
self.assertEqual(results[1].name, "User B")
self.assertEqual(results[1].age, 30)
# Use a query to filter the people found to just person1
people = self.Person.objects(age=20)
self.assertEqual(people.count(), 1)
person = people.next()
self.assertEqual(person.name, "User A")
self.assertEqual(person.age, 20)
# Test limit
people = list(self.Person.objects.limit(1))
self.assertEqual(len(people), 1)
self.assertEqual(people[0].name, 'User A')
# Test skip
people = list(self.Person.objects.skip(1))
self.assertEqual(len(people), 1)
self.assertEqual(people[0].name, 'User B')
person3 = self.Person(name="User C", age=40)
person3.save()
# Test slice limit
people = list(self.Person.objects[:2])
self.assertEqual(len(people), 2)
self.assertEqual(people[0].name, 'User A')
self.assertEqual(people[1].name, 'User B')
# Test slice skip
people = list(self.Person.objects[1:])
self.assertEqual(len(people), 2)
self.assertEqual(people[0].name, 'User B')
self.assertEqual(people[1].name, 'User C')
# Test slice limit and skip
people = list(self.Person.objects[1:2])
self.assertEqual(len(people), 1)
self.assertEqual(people[0].name, 'User B')
# Test slice limit and skip cursor reset
qs = self.Person.objects[1:2]
# fetch then delete the cursor
qs._cursor
qs._cursor_obj = None
people = list(qs)
self.assertEqual(len(people), 1)
self.assertEqual(people[0].name, 'User B')
people = list(self.Person.objects[1:1])
self.assertEqual(len(people), 0)
# Test slice out of range
people = list(self.Person.objects[80000:80001])
self.assertEqual(len(people), 0)
# Test larger slice __repr__
self.Person.objects.delete()
for i in xrange(55):
self.Person(name='A%s' % i, age=i).save()
self.assertEqual(self.Person.objects.count(), 55)
self.assertEqual("Person object", "%s" % self.Person.objects[0])
self.assertEqual("[<Person: Person object>, <Person: Person object>]", "%s" % self.Person.objects[1:3])
self.assertEqual("[<Person: Person object>, <Person: Person object>]", "%s" % self.Person.objects[51:53])
def test_find_one(self):
"""Ensure that a query using find_one returns a valid result.
"""
person1 = self.Person(name="User A", age=20)
person1.save()
person2 = self.Person(name="User B", age=30)
person2.save()
# Retrieve the first person from the database
person = self.Person.objects.first()
self.assertTrue(isinstance(person, self.Person))
self.assertEqual(person.name, "User A")
self.assertEqual(person.age, 20)
# Use a query to filter the people found to just person2
person = self.Person.objects(age=30).first()
self.assertEqual(person.name, "User B")
person = self.Person.objects(age__lt=30).first()
self.assertEqual(person.name, "User A")
# Use array syntax
person = self.Person.objects[0]
self.assertEqual(person.name, "User A")
person = self.Person.objects[1]
self.assertEqual(person.name, "User B")
self.assertRaises(IndexError, self.Person.objects.__getitem__, 2)
# Find a document using just the object id
person = self.Person.objects.with_id(person1.id)
self.assertEqual(person.name, "User A")
self.assertRaises(InvalidQueryError, self.Person.objects(name="User A").with_id, person1.id)
def test_find_only_one(self):
"""Ensure that a query using ``get`` returns at most one result.
"""
# Try retrieving when no objects exists
self.assertRaises(DoesNotExist, self.Person.objects.get)
self.assertRaises(self.Person.DoesNotExist, self.Person.objects.get)
person1 = self.Person(name="User A", age=20)
person1.save()
person2 = self.Person(name="User B", age=30)
person2.save()
# Retrieve the first person from the database
self.assertRaises(MultipleObjectsReturned, self.Person.objects.get)
self.assertRaises(self.Person.MultipleObjectsReturned,
self.Person.objects.get)
# Use a query to filter the people found to just person2
person = self.Person.objects.get(age=30)
self.assertEqual(person.name, "User B")
person = self.Person.objects.get(age__lt=30)
self.assertEqual(person.name, "User A")
def test_find_array_position(self):
"""Ensure that query by array position works.
"""
class Comment(EmbeddedDocument):
name = StringField()
class Post(EmbeddedDocument):
comments = ListField(EmbeddedDocumentField(Comment))
class Blog(Document):
tags = ListField(StringField())
posts = ListField(EmbeddedDocumentField(Post))
Blog.drop_collection()
Blog.objects.create(tags=['a', 'b'])
self.assertEqual(Blog.objects(tags__0='a').count(), 1)
self.assertEqual(Blog.objects(tags__0='b').count(), 0)
self.assertEqual(Blog.objects(tags__1='a').count(), 0)
self.assertEqual(Blog.objects(tags__1='b').count(), 1)
Blog.drop_collection()
comment1 = Comment(name='testa')
comment2 = Comment(name='testb')
post1 = Post(comments=[comment1, comment2])
post2 = Post(comments=[comment2, comment2])
blog1 = Blog.objects.create(posts=[post1, post2])
blog2 = Blog.objects.create(posts=[post2, post1])
blog = Blog.objects(posts__0__comments__0__name='testa').get()
self.assertEqual(blog, blog1)
query = Blog.objects(posts__1__comments__1__name='testb')
self.assertEqual(query.count(), 2)
query = Blog.objects(posts__1__comments__1__name='testa')
self.assertEqual(query.count(), 0)
query = Blog.objects(posts__0__comments__1__name='testa')
self.assertEqual(query.count(), 0)
Blog.drop_collection()
def test_none(self):
class A(Document):
s = StringField()
A.drop_collection()
A().save()
self.assertEqual(list(A.objects.none()), [])
self.assertEqual(list(A.objects.none().all()), [])
self.assertEqual(A.objects.none().count(), 0)
def test_chaining(self):
class A(Document):
s = StringField()
class B(Document):
ref = ReferenceField(A)
boolfield = BooleanField(default=False)
A.drop_collection()
B.drop_collection()
a1 = A(s="test1").save()
a2 = A(s="test2").save()
B(ref=a1, boolfield=True).save()
# Works
q1 = B.objects.filter(ref__in=[a1, a2], ref=a1)._query
# Doesn't work
q2 = B.objects.filter(ref__in=[a1, a2])
q2 = q2.filter(ref=a1)._query
self.assertEqual(q1, q2)
a_objects = A.objects(s='test1')
query = B.objects(ref__in=a_objects)
query = query.filter(boolfield=True)
self.assertEqual(query.count(), 1)
def test_batch_size(self):
"""Test that batch_size works."""
class A(Document):
s = StringField()
A.drop_collection()
for i in range(100):
A.objects.create(s=str(i))
cnt = 0
for a in A.objects.batch_size(10):
cnt += 1
self.assertEqual(cnt, 100)
# test chaining
qs = A.objects.all()
qs = qs.limit(10).batch_size(20).skip(91)
cnt = 0
for a in qs:
cnt += 1
self.assertEqual(cnt, 9)
def test_update_write_concern(self):
"""Test that passing write_concern works"""
self.Person.drop_collection()
write_concern = {"fsync": True}
author, created = self.Person.objects.get_or_create(
name='Test User', write_concern=write_concern)
author.save(write_concern=write_concern)
result = self.Person.objects.update(
set__name='Ross', write_concern={"w": 1})
self.assertEqual(result, 1)
result = self.Person.objects.update(
set__name='Ross', write_concern={"w": 0})
self.assertEqual(result, None)
result = self.Person.objects.update_one(
set__name='Test User', write_concern={"w": 1})
self.assertEqual(result, 1)
result = self.Person.objects.update_one(
set__name='Test User', write_concern={"w": 0})
self.assertEqual(result, None)
def test_update_update_has_a_value(self):
"""Test to ensure that update is passed a value to update to"""
self.Person.drop_collection()
author = self.Person(name='Test User')
author.save()
def update_raises():
self.Person.objects(pk=author.pk).update({})
def update_one_raises():
self.Person.objects(pk=author.pk).update_one({})
self.assertRaises(OperationError, update_raises)
self.assertRaises(OperationError, update_one_raises)
def test_update_array_position(self):
"""Ensure that updating by array position works.
Check update() and update_one() can take syntax like:
set__posts__1__comments__1__name="testc"
Check that it only works for ListFields.
"""
class Comment(EmbeddedDocument):
name = StringField()
class Post(EmbeddedDocument):
comments = ListField(EmbeddedDocumentField(Comment))
class Blog(Document):
tags = ListField(StringField())
posts = ListField(EmbeddedDocumentField(Post))
Blog.drop_collection()
comment1 = Comment(name='testa')
comment2 = Comment(name='testb')
post1 = Post(comments=[comment1, comment2])
post2 = Post(comments=[comment2, comment2])
Blog.objects.create(posts=[post1, post2])
Blog.objects.create(posts=[post2, post1])
# Update all of the first comments of second posts of all blogs
Blog.objects().update(set__posts__1__comments__0__name="testc")
testc_blogs = Blog.objects(posts__1__comments__0__name="testc")
self.assertEqual(testc_blogs.count(), 2)
Blog.drop_collection()
Blog.objects.create(posts=[post1, post2])
Blog.objects.create(posts=[post2, post1])
# Update only the first blog returned by the query
Blog.objects().update_one(
set__posts__1__comments__1__name="testc")
testc_blogs = Blog.objects(posts__1__comments__1__name="testc")
self.assertEqual(testc_blogs.count(), 1)
# Check that using this indexing syntax on a non-list fails
def non_list_indexing():
Blog.objects().update(set__posts__1__comments__0__name__1="asdf")
self.assertRaises(InvalidQueryError, non_list_indexing)
Blog.drop_collection()
def test_update_using_positional_operator(self):
"""Ensure that the list fields can be updated using the positional
operator."""
class Comment(EmbeddedDocument):
by = StringField()
votes = IntField()
class BlogPost(Document):
title = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
BlogPost.drop_collection()
c1 = Comment(by="joe", votes=3)
c2 = Comment(by="jane", votes=7)
BlogPost(title="ABC", comments=[c1, c2]).save()
BlogPost.objects(comments__by="jane").update(inc__comments__S__votes=1)
post = BlogPost.objects.first()
self.assertEqual(post.comments[1].by, 'jane')
self.assertEqual(post.comments[1].votes, 8)
def test_update_using_positional_operator_matches_first(self):
# Currently the $ operator only applies to the first matched item in
# the query
class Simple(Document):
x = ListField()
Simple.drop_collection()
Simple(x=[1, 2, 3, 2]).save()
Simple.objects(x=2).update(inc__x__S=1)
simple = Simple.objects.first()
self.assertEqual(simple.x, [1, 3, 3, 2])
Simple.drop_collection()
# You can set multiples
Simple.drop_collection()
Simple(x=[1, 2, 3, 4]).save()
Simple(x=[2, 3, 4, 5]).save()
Simple(x=[3, 4, 5, 6]).save()
Simple(x=[4, 5, 6, 7]).save()
Simple.objects(x=3).update(set__x__S=0)
s = Simple.objects()
self.assertEqual(s[0].x, [1, 2, 0, 4])
self.assertEqual(s[1].x, [2, 0, 4, 5])
self.assertEqual(s[2].x, [0, 4, 5, 6])
self.assertEqual(s[3].x, [4, 5, 6, 7])
# Using "$unset" with an expression like this "array.$" will result in
# the array item becoming None, not being removed.
Simple.drop_collection()
Simple(x=[1, 2, 3, 4, 3, 2, 3, 4]).save()
Simple.objects(x=3).update(unset__x__S=1)
simple = Simple.objects.first()
self.assertEqual(simple.x, [1, 2, None, 4, 3, 2, 3, 4])
# Nested updates arent supported yet..
def update_nested():
Simple.drop_collection()
Simple(x=[{'test': [1, 2, 3, 4]}]).save()
Simple.objects(x__test=2).update(set__x__S__test__S=3)
self.assertEqual(simple.x, [1, 2, 3, 4])
self.assertRaises(OperationError, update_nested)
Simple.drop_collection()
def test_update_using_positional_operator_embedded_document(self):
"""Ensure that the embedded documents can be updated using the positional
operator."""
class Vote(EmbeddedDocument):
score = IntField()
class Comment(EmbeddedDocument):
by = StringField()
votes = EmbeddedDocumentField(Vote)
class BlogPost(Document):
title = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
BlogPost.drop_collection()
c1 = Comment(by="joe", votes=Vote(score=3))
c2 = Comment(by="jane", votes=Vote(score=7))
BlogPost(title="ABC", comments=[c1, c2]).save()
BlogPost.objects(comments__by="joe").update(set__comments__S__votes=Vote(score=4))
post = BlogPost.objects.first()
self.assertEqual(post.comments[0].by, 'joe')
self.assertEqual(post.comments[0].votes.score, 4)
def test_updates_can_have_match_operators(self):
class Post(Document):
title = StringField(required=True)
tags = ListField(StringField())
comments = ListField(EmbeddedDocumentField("Comment"))
class Comment(EmbeddedDocument):
content = StringField()
name = StringField(max_length=120)
vote = IntField()
Post.drop_collection()
comm1 = Comment(content="very funny indeed", name="John S", vote=1)
comm2 = Comment(content="kind of funny", name="Mark P", vote=0)
Post(title='Fun with MongoEngine', tags=['mongodb', 'mongoengine'],
comments=[comm1, comm2]).save()
Post.objects().update_one(pull__comments__vote__lt=1)
self.assertEqual(1, len(Post.objects.first().comments))
def test_mapfield_update(self):
"""Ensure that the MapField can be updated."""
class Member(EmbeddedDocument):
gender = StringField()
age = IntField()
class Club(Document):
members = MapField(EmbeddedDocumentField(Member))
Club.drop_collection()
club = Club()
club.members['John'] = Member(gender="M", age=13)
club.save()
Club.objects().update(
set__members={"John": Member(gender="F", age=14)})
club = Club.objects().first()
self.assertEqual(club.members['John'].gender, "F")
self.assertEqual(club.members['John'].age, 14)
def test_dictfield_update(self):
"""Ensure that the DictField can be updated."""
class Club(Document):
members = DictField()
club = Club()
club.members['John'] = dict(gender="M", age=13)
club.save()
Club.objects().update(
set__members={"John": dict(gender="F", age=14)})
club = Club.objects().first()
self.assertEqual(club.members['John']['gender'], "F")
self.assertEqual(club.members['John']['age'], 14)
def test_upsert(self):
self.Person.drop_collection()
self.Person.objects(pk=ObjectId(), name="Bob", age=30).update(upsert=True)
bob = self.Person.objects.first()
self.assertEqual("Bob", bob.name)
self.assertEqual(30, bob.age)
def test_upsert_one(self):
self.Person.drop_collection()
self.Person.objects(name="Bob", age=30).update_one(upsert=True)
bob = self.Person.objects.first()
self.assertEqual("Bob", bob.name)
self.assertEqual(30, bob.age)
def test_set_on_insert(self):
self.Person.drop_collection()
self.Person.objects(pk=ObjectId()).update(set__name='Bob', set_on_insert__age=30, upsert=True)
bob = self.Person.objects.first()
self.assertEqual("Bob", bob.name)
self.assertEqual(30, bob.age)
def test_get_or_create(self):
"""Ensure that ``get_or_create`` returns one result or creates a new
document.
"""
person1 = self.Person(name="User A", age=20)
person1.save()
person2 = self.Person(name="User B", age=30)
person2.save()
# Retrieve the first person from the database
self.assertRaises(MultipleObjectsReturned,
self.Person.objects.get_or_create)
self.assertRaises(self.Person.MultipleObjectsReturned,
self.Person.objects.get_or_create)
# Use a query to filter the people found to just person2
person, created = self.Person.objects.get_or_create(age=30)
self.assertEqual(person.name, "User B")
self.assertEqual(created, False)
person, created = self.Person.objects.get_or_create(age__lt=30)
self.assertEqual(person.name, "User A")
self.assertEqual(created, False)
# Try retrieving when no objects exists - new doc should be created
kwargs = dict(age=50, defaults={'name': 'User C'})
person, created = self.Person.objects.get_or_create(**kwargs)
self.assertEqual(created, True)
person = self.Person.objects.get(age=50)
self.assertEqual(person.name, "User C")
def test_bulk_insert(self):
"""Ensure that bulk insert works
"""
class Comment(EmbeddedDocument):
name = StringField()
class Post(EmbeddedDocument):
comments = ListField(EmbeddedDocumentField(Comment))
class Blog(Document):
title = StringField(unique=True)
tags = ListField(StringField())
posts = ListField(EmbeddedDocumentField(Post))
Blog.drop_collection()
# Recreates the collection
self.assertEqual(0, Blog.objects.count())
comment1 = Comment(name='testa')
comment2 = Comment(name='testb')
post1 = Post(comments=[comment1, comment2])
post2 = Post(comments=[comment2, comment2])
# Check bulk insert using load_bulk=False
blogs = [Blog(title="%s" % i, posts=[post1, post2])
for i in range(99)]
with query_counter() as q:
self.assertEqual(q, 0)
Blog.objects.insert(blogs, load_bulk=False)
self.assertEqual(q, 1) # 1 entry containing the list of inserts
self.assertEqual(Blog.objects.count(), len(blogs))
Blog.drop_collection()
Blog.ensure_indexes()
# Check bulk insert using load_bulk=True
blogs = [Blog(title="%s" % i, posts=[post1, post2])
for i in range(99)]
with query_counter() as q:
self.assertEqual(q, 0)
Blog.objects.insert(blogs)
self.assertEqual(q, 2) # 1 for insert 1 for fetch
Blog.drop_collection()
comment1 = Comment(name='testa')
comment2 = Comment(name='testb')
post1 = Post(comments=[comment1, comment2])
post2 = Post(comments=[comment2, comment2])
blog1 = Blog(title="code", posts=[post1, post2])
blog2 = Blog(title="mongodb", posts=[post2, post1])
blog1, blog2 = Blog.objects.insert([blog1, blog2])
self.assertEqual(blog1.title, "code")
self.assertEqual(blog2.title, "mongodb")
self.assertEqual(Blog.objects.count(), 2)
# test inserting an existing document (shouldn't be allowed)
with self.assertRaises(OperationError) as cm:
blog = Blog.objects.first()
Blog.objects.insert(blog)
self.assertEqual(
str(cm.exception),
'Some documents have ObjectIds, use doc.update() instead'
)
# test inserting a query set
with self.assertRaises(OperationError) as cm:
blogs_qs = Blog.objects
Blog.objects.insert(blogs_qs)
self.assertEqual(
str(cm.exception),
'Some documents have ObjectIds, use doc.update() instead'
)
# insert 1 new doc
new_post = Blog(title="code123", id=ObjectId())
Blog.objects.insert(new_post)
Blog.drop_collection()
blog1 = Blog(title="code", posts=[post1, post2])
blog1 = Blog.objects.insert(blog1)
self.assertEqual(blog1.title, "code")
self.assertEqual(Blog.objects.count(), 1)
Blog.drop_collection()
blog1 = Blog(title="code", posts=[post1, post2])
obj_id = Blog.objects.insert(blog1, load_bulk=False)
self.assertIsInstance(obj_id, ObjectId)
Blog.drop_collection()
Blog.ensure_indexes()
post3 = Post(comments=[comment1, comment1])
blog1 = Blog(title="foo", posts=[post1, post2])
blog2 = Blog(title="bar", posts=[post2, post3])
Blog.objects.insert([blog1, blog2])
with self.assertRaises(NotUniqueError):
Blog.objects.insert(Blog(title=blog2.title))
self.assertEqual(Blog.objects.count(), 2)
def test_get_changed_fields_query_count(self):
class Person(Document):
name = StringField()
owns = ListField(ReferenceField('Organization'))
projects = ListField(ReferenceField('Project'))
class Organization(Document):
name = StringField()
owner = ReferenceField('Person')
employees = ListField(ReferenceField('Person'))
class Project(Document):
name = StringField()
Person.drop_collection()
Organization.drop_collection()
Project.drop_collection()
r1 = Project(name="r1").save()
r2 = Project(name="r2").save()
r3 = Project(name="r3").save()
p1 = Person(name="p1", projects=[r1, r2]).save()
p2 = Person(name="p2", projects=[r2, r3]).save()
o1 = Organization(name="o1", employees=[p1]).save()
with query_counter() as q:
self.assertEqual(q, 0)
fresh_o1 = Organization.objects.get(id=o1.id)
self.assertEqual(1, q)
fresh_o1._get_changed_fields()
self.assertEqual(1, q)
with query_counter() as q:
self.assertEqual(q, 0)
fresh_o1 = Organization.objects.get(id=o1.id)
fresh_o1.save() # No changes, does nothing
self.assertEqual(q, 1)
with query_counter() as q:
self.assertEqual(q, 0)
fresh_o1 = Organization.objects.get(id=o1.id)
fresh_o1.save(cascade=False) # No changes, does nothing
self.assertEqual(q, 1)
with query_counter() as q:
self.assertEqual(q, 0)
fresh_o1 = Organization.objects.get(id=o1.id)
fresh_o1.employees.append(p2)
fresh_o1.save(cascade=False) # Saves
self.assertEqual(q, 2)
def test_timeout_and_cursor_args(self):
"""Ensures the cursor args can be set as expected
"""
p = self.Person.objects
self.assertEqual(p._cursor_args, {'no_cursor_timeout': False})
p = p.timeout(False)
self.assertEqual(p._cursor_args, {'no_cursor_timeout': True})
p = p.timeout(True)
self.assertEqual(p._cursor_args, {'no_cursor_timeout': False})
def test_repeated_iteration(self):
"""Ensure that QuerySet rewinds itself one iteration finishes.
"""
self.Person(name='Person 1').save()
self.Person(name='Person 2').save()
queryset = self.Person.objects
people1 = [person for person in queryset]
people2 = [person for person in queryset]
# Check that it still works even if iteration is interrupted.
for person in queryset:
break
people3 = [person for person in queryset]
self.assertEqual(people1, people2)
self.assertEqual(people1, people3)
def test_repr(self):
"""Test repr behavior isnt destructive"""
class Doc(Document):
number = IntField()
def __repr__(self):
return "<Doc: %s>" % self.number
Doc.drop_collection()
for i in xrange(1000):
Doc(number=i).save()
docs = Doc.objects.order_by('number')
self.assertEqual(docs.count(), 1000)
docs_string = "%s" % docs
self.assertTrue("Doc: 0" in docs_string)
self.assertEqual(docs.count(), 1000)
self.assertTrue('(remaining elements truncated)' in "%s" % docs)
# Limit and skip
docs = docs[1:4]
self.assertEqual('[<Doc: 1>, <Doc: 2>, <Doc: 3>]', "%s" % docs)
self.assertEqual(docs.count(), 3)
for doc in docs:
self.assertEqual('.. queryset mid-iteration ..', repr(docs))
def test_regex_query_shortcuts(self):
"""Ensure that contains, startswith, endswith, etc work.
"""
person = self.Person(name='Guido van Rossum')
person.save()
# Test contains
obj = self.Person.objects(name__contains='van').first()
self.assertEqual(obj, person)
obj = self.Person.objects(name__contains='Van').first()
self.assertEqual(obj, None)
# Test icontains
obj = self.Person.objects(name__icontains='Van').first()
self.assertEqual(obj, person)
# Test startswith
obj = self.Person.objects(name__startswith='Guido').first()
self.assertEqual(obj, person)
obj = self.Person.objects(name__startswith='guido').first()
self.assertEqual(obj, None)
# Test istartswith
obj = self.Person.objects(name__istartswith='guido').first()
self.assertEqual(obj, person)
# Test endswith
obj = self.Person.objects(name__endswith='Rossum').first()
self.assertEqual(obj, person)
obj = self.Person.objects(name__endswith='rossuM').first()
self.assertEqual(obj, None)
# Test iendswith
obj = self.Person.objects(name__iendswith='rossuM').first()
self.assertEqual(obj, person)
# Test exact
obj = self.Person.objects(name__exact='Guido van Rossum').first()
self.assertEqual(obj, person)
obj = self.Person.objects(name__exact='Guido van rossum').first()
self.assertEqual(obj, None)
obj = self.Person.objects(name__exact='Guido van Rossu').first()
self.assertEqual(obj, None)
# Test iexact
obj = self.Person.objects(name__iexact='gUIDO VAN rOSSUM').first()
self.assertEqual(obj, person)
obj = self.Person.objects(name__iexact='gUIDO VAN rOSSU').first()
self.assertEqual(obj, None)
# Test unsafe expressions
person = self.Person(name='Guido van Rossum [.\'Geek\']')
person.save()
obj = self.Person.objects(name__icontains='[.\'Geek').first()
self.assertEqual(obj, person)
def test_not(self):
"""Ensure that the __not operator works as expected.
"""
alice = self.Person(name='Alice', age=25)
alice.save()
obj = self.Person.objects(name__iexact='alice').first()
self.assertEqual(obj, alice)
obj = self.Person.objects(name__not__iexact='alice').first()
self.assertEqual(obj, None)
def test_filter_chaining(self):
"""Ensure filters can be chained together.
"""
class Blog(Document):
id = StringField(unique=True, primary_key=True)
class BlogPost(Document):
blog = ReferenceField(Blog)
title = StringField()
is_published = BooleanField()
published_date = DateTimeField()
@queryset_manager
def published(doc_cls, queryset):
return queryset(is_published=True)
Blog.drop_collection()
BlogPost.drop_collection()
blog_1 = Blog(id="1")
blog_2 = Blog(id="2")
blog_3 = Blog(id="3")
blog_1.save()
blog_2.save()
blog_3.save()
blog_post_1 = BlogPost(blog=blog_1, title="Blog Post #1",
is_published=True,
published_date=datetime(2010, 1, 5, 0, 0, 0))
blog_post_2 = BlogPost(blog=blog_2, title="Blog Post #2",
is_published=True,
published_date=datetime(2010, 1, 6, 0, 0, 0))
blog_post_3 = BlogPost(blog=blog_3, title="Blog Post #3",
is_published=True,
published_date=datetime(2010, 1, 7, 0, 0, 0))
blog_post_1.save()
blog_post_2.save()
blog_post_3.save()
# find all published blog posts before 2010-01-07
published_posts = BlogPost.published()
published_posts = published_posts.filter(
published_date__lt=datetime(2010, 1, 7, 0, 0, 0))
self.assertEqual(published_posts.count(), 2)
blog_posts = BlogPost.objects
blog_posts = blog_posts.filter(blog__in=[blog_1, blog_2])
blog_posts = blog_posts.filter(blog=blog_3)
self.assertEqual(blog_posts.count(), 0)
BlogPost.drop_collection()
Blog.drop_collection()
def assertSequence(self, qs, expected):
qs = list(qs)
expected = list(expected)
self.assertEqual(len(qs), len(expected))
for i in xrange(len(qs)):
self.assertEqual(qs[i], expected[i])
def test_ordering(self):
"""Ensure default ordering is applied and can be overridden.
"""
class BlogPost(Document):
title = StringField()
published_date = DateTimeField()
meta = {
'ordering': ['-published_date']
}
BlogPost.drop_collection()
blog_post_1 = BlogPost(title="Blog Post #1",
published_date=datetime(2010, 1, 5, 0, 0, 0))
blog_post_2 = BlogPost(title="Blog Post #2",
published_date=datetime(2010, 1, 6, 0, 0, 0))
blog_post_3 = BlogPost(title="Blog Post #3",
published_date=datetime(2010, 1, 7, 0, 0, 0))
blog_post_1.save()
blog_post_2.save()
blog_post_3.save()
# get the "first" BlogPost using default ordering
# from BlogPost.meta.ordering
expected = [blog_post_3, blog_post_2, blog_post_1]
self.assertSequence(BlogPost.objects.all(), expected)
# override default ordering, order BlogPosts by "published_date"
qs = BlogPost.objects.order_by("+published_date")
expected = [blog_post_1, blog_post_2, blog_post_3]
self.assertSequence(qs, expected)
def test_find_embedded(self):
"""Ensure that an embedded document is properly returned from a query.
"""
class User(EmbeddedDocument):
name = StringField()
class BlogPost(Document):
content = StringField()
author = EmbeddedDocumentField(User)
BlogPost.drop_collection()
post = BlogPost(content='Had a good coffee today...')
post.author = User(name='Test User')
post.save()
result = BlogPost.objects.first()
self.assertTrue(isinstance(result.author, User))
self.assertEqual(result.author.name, 'Test User')
BlogPost.drop_collection()
def test_find_dict_item(self):
"""Ensure that DictField items may be found.
"""
class BlogPost(Document):
info = DictField()
BlogPost.drop_collection()
post = BlogPost(info={'title': 'test'})
post.save()
post_obj = BlogPost.objects(info__title='test').first()
self.assertEqual(post_obj.id, post.id)
BlogPost.drop_collection()
def test_exec_js_query(self):
"""Ensure that queries are properly formed for use in exec_js.
"""
class BlogPost(Document):
hits = IntField()
published = BooleanField()
BlogPost.drop_collection()
post1 = BlogPost(hits=1, published=False)
post1.save()
post2 = BlogPost(hits=1, published=True)
post2.save()
post3 = BlogPost(hits=1, published=True)
post3.save()
js_func = """
function(hitsField) {
var count = 0;
db[collection].find(query).forEach(function(doc) {
count += doc[hitsField];
});
return count;
}
"""
# Ensure that normal queries work
c = BlogPost.objects(published=True).exec_js(js_func, 'hits')
self.assertEqual(c, 2)
c = BlogPost.objects(published=False).exec_js(js_func, 'hits')
self.assertEqual(c, 1)
BlogPost.drop_collection()
def test_exec_js_field_sub(self):
"""Ensure that field substitutions occur properly in exec_js functions.
"""
class Comment(EmbeddedDocument):
content = StringField(db_field='body')
class BlogPost(Document):
name = StringField(db_field='doc-name')
comments = ListField(EmbeddedDocumentField(Comment),
db_field='cmnts')
BlogPost.drop_collection()
comments1 = [Comment(content='cool'), Comment(content='yay')]
post1 = BlogPost(name='post1', comments=comments1)
post1.save()
comments2 = [Comment(content='nice stuff')]
post2 = BlogPost(name='post2', comments=comments2)
post2.save()
code = """
function getComments() {
var comments = [];
db[collection].find(query).forEach(function(doc) {
var docComments = doc[~comments];
for (var i = 0; i < docComments.length; i++) {
comments.push({
'document': doc[~name],
'comment': doc[~comments][i][~comments.content]
});
}
});
return comments;
}
"""
sub_code = BlogPost.objects._sub_js_fields(code)
code_chunks = ['doc["cmnts"];', 'doc["doc-name"],',
'doc["cmnts"][i]["body"]']
for chunk in code_chunks:
self.assertTrue(chunk in sub_code)
results = BlogPost.objects.exec_js(code)
expected_results = [
{u'comment': u'cool', u'document': u'post1'},
{u'comment': u'yay', u'document': u'post1'},
{u'comment': u'nice stuff', u'document': u'post2'},
]
self.assertEqual(results, expected_results)
# Test template style
code = "{{~comments.content}}"
sub_code = BlogPost.objects._sub_js_fields(code)
self.assertEqual("cmnts.body", sub_code)
BlogPost.drop_collection()
def test_delete(self):
"""Ensure that documents are properly deleted from the database.
"""
self.Person(name="User A", age=20).save()
self.Person(name="User B", age=30).save()
self.Person(name="User C", age=40).save()
self.assertEqual(self.Person.objects.count(), 3)
self.Person.objects(age__lt=30).delete()
self.assertEqual(self.Person.objects.count(), 2)
self.Person.objects.delete()
self.assertEqual(self.Person.objects.count(), 0)
def test_reverse_delete_rule_cascade(self):
"""Ensure cascading deletion of referring documents from the database.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
BlogPost.drop_collection()
me = self.Person(name='Test User')
me.save()
someoneelse = self.Person(name='Some-one Else')
someoneelse.save()
BlogPost(content='Watching TV', author=me).save()
BlogPost(content='Chilling out', author=me).save()
BlogPost(content='Pro Testing', author=someoneelse).save()
self.assertEqual(3, BlogPost.objects.count())
self.Person.objects(name='Test User').delete()
self.assertEqual(1, BlogPost.objects.count())
def test_reverse_delete_rule_cascade_self_referencing(self):
"""Ensure self-referencing CASCADE deletes do not result in infinite
loop
"""
class Category(Document):
name = StringField()
parent = ReferenceField('self', reverse_delete_rule=CASCADE)
Category.drop_collection()
num_children = 3
base = Category(name='Root')
base.save()
# Create a simple parent-child tree
for i in range(num_children):
child_name = 'Child-%i' % i
child = Category(name=child_name, parent=base)
child.save()
for i in range(num_children):
child_child_name = 'Child-Child-%i' % i
child_child = Category(name=child_child_name, parent=child)
child_child.save()
tree_size = 1 + num_children + (num_children * num_children)
self.assertEqual(tree_size, Category.objects.count())
self.assertEqual(num_children, Category.objects(parent=base).count())
# The delete should effectively wipe out the Category collection
# without resulting in infinite parent-child cascade recursion
base.delete()
self.assertEqual(0, Category.objects.count())
def test_reverse_delete_rule_nullify(self):
"""Ensure nullification of references to deleted documents.
"""
class Category(Document):
name = StringField()
class BlogPost(Document):
content = StringField()
category = ReferenceField(Category, reverse_delete_rule=NULLIFY)
BlogPost.drop_collection()
Category.drop_collection()
lameness = Category(name='Lameness')
lameness.save()
post = BlogPost(content='Watching TV', category=lameness)
post.save()
self.assertEqual(1, BlogPost.objects.count())
self.assertEqual('Lameness', BlogPost.objects.first().category.name)
Category.objects.delete()
self.assertEqual(1, BlogPost.objects.count())
self.assertEqual(None, BlogPost.objects.first().category)
def test_reverse_delete_rule_deny(self):
"""Ensure deletion gets denied on documents that still have references
to them.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=DENY)
BlogPost.drop_collection()
self.Person.drop_collection()
me = self.Person(name='Test User')
me.save()
post = BlogPost(content='Watching TV', author=me)
post.save()
self.assertRaises(OperationError, self.Person.objects.delete)
def test_reverse_delete_rule_pull(self):
"""Ensure pulling of references to deleted documents.
"""
class BlogPost(Document):
content = StringField()
authors = ListField(ReferenceField(self.Person,
reverse_delete_rule=PULL))
BlogPost.drop_collection()
self.Person.drop_collection()
me = self.Person(name='Test User')
me.save()
someoneelse = self.Person(name='Some-one Else')
someoneelse.save()
post = BlogPost(content='Watching TV', authors=[me, someoneelse])
post.save()
another = BlogPost(content='Chilling Out', authors=[someoneelse])
another.save()
someoneelse.delete()
post.reload()
another.reload()
self.assertEqual(post.authors, [me])
self.assertEqual(another.authors, [])
def test_delete_with_limits(self):
class Log(Document):
pass
Log.drop_collection()
for i in xrange(10):
Log().save()
Log.objects()[3:5].delete()
self.assertEqual(8, Log.objects.count())
def test_delete_with_limit_handles_delete_rules(self):
"""Ensure cascading deletion of referring documents from the database.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
BlogPost.drop_collection()
me = self.Person(name='Test User')
me.save()
someoneelse = self.Person(name='Some-one Else')
someoneelse.save()
BlogPost(content='Watching TV', author=me).save()
BlogPost(content='Chilling out', author=me).save()
BlogPost(content='Pro Testing', author=someoneelse).save()
self.assertEqual(3, BlogPost.objects.count())
self.Person.objects()[:1].delete()
self.assertEqual(1, BlogPost.objects.count())
def test_reference_field_find(self):
"""Ensure cascading deletion of referring documents from the database.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person)
BlogPost.drop_collection()
self.Person.drop_collection()
me = self.Person(name='Test User').save()
BlogPost(content="test 123", author=me).save()
self.assertEqual(1, BlogPost.objects(author=me).count())
self.assertEqual(1, BlogPost.objects(author=me.pk).count())
self.assertEqual(1, BlogPost.objects(author="%s" % me.pk).count())
self.assertEqual(1, BlogPost.objects(author__in=[me]).count())
self.assertEqual(1, BlogPost.objects(author__in=[me.pk]).count())
self.assertEqual(1, BlogPost.objects(author__in=["%s" % me.pk]).count())
def test_reference_field_find_dbref(self):
"""Ensure cascading deletion of referring documents from the database.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, dbref=True)
BlogPost.drop_collection()
self.Person.drop_collection()
me = self.Person(name='Test User').save()
BlogPost(content="test 123", author=me).save()
self.assertEqual(1, BlogPost.objects(author=me).count())
self.assertEqual(1, BlogPost.objects(author=me.pk).count())
self.assertEqual(1, BlogPost.objects(author="%s" % me.pk).count())
self.assertEqual(1, BlogPost.objects(author__in=[me]).count())
self.assertEqual(1, BlogPost.objects(author__in=[me.pk]).count())
self.assertEqual(1, BlogPost.objects(author__in=["%s" % me.pk]).count())
def test_update(self):
"""Ensure that atomic updates work properly.
"""
class BlogPost(Document):
title = StringField()
hits = IntField()
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(name="Test Post", hits=5, tags=['test'])
post.save()
BlogPost.objects.update(set__hits=10)
post.reload()
self.assertEqual(post.hits, 10)
BlogPost.objects.update_one(inc__hits=1)
post.reload()
self.assertEqual(post.hits, 11)
BlogPost.objects.update_one(dec__hits=1)
post.reload()
self.assertEqual(post.hits, 10)
BlogPost.objects.update(push__tags='mongo')
post.reload()
self.assertTrue('mongo' in post.tags)
BlogPost.objects.update_one(push_all__tags=['db', 'nosql'])
post.reload()
self.assertTrue('db' in post.tags and 'nosql' in post.tags)
tags = post.tags[:-1]
BlogPost.objects.update(pop__tags=1)
post.reload()
self.assertEqual(post.tags, tags)
BlogPost.objects.update_one(add_to_set__tags='unique')
BlogPost.objects.update_one(add_to_set__tags='unique')
post.reload()
self.assertEqual(post.tags.count('unique'), 1)
self.assertNotEqual(post.hits, None)
BlogPost.objects.update_one(unset__hits=1)
post.reload()
self.assertEqual(post.hits, None)
BlogPost.drop_collection()
def test_update_push_and_pull_add_to_set(self):
"""Ensure that the 'pull' update operation works correctly.
"""
class BlogPost(Document):
slug = StringField()
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(slug="test")
post.save()
BlogPost.objects.filter(id=post.id).update(push__tags="code")
post.reload()
self.assertEqual(post.tags, ["code"])
BlogPost.objects.filter(id=post.id).update(push_all__tags=["mongodb", "code"])
post.reload()
self.assertEqual(post.tags, ["code", "mongodb", "code"])
BlogPost.objects(slug="test").update(pull__tags="code")
post.reload()
self.assertEqual(post.tags, ["mongodb"])
BlogPost.objects(slug="test").update(pull_all__tags=["mongodb", "code"])
post.reload()
self.assertEqual(post.tags, [])
BlogPost.objects(slug="test").update(__raw__={"$addToSet": {"tags": {"$each": ["code", "mongodb", "code"]}}})
post.reload()
self.assertEqual(post.tags, ["code", "mongodb"])
def test_add_to_set_each(self):
class Item(Document):
name = StringField(required=True)
description = StringField(max_length=50)
parents = ListField(ReferenceField('self'))
Item.drop_collection()
item = Item(name='test item').save()
parent_1 = Item(name='parent 1').save()
parent_2 = Item(name='parent 2').save()
item.update(add_to_set__parents=[parent_1, parent_2, parent_1])
item.reload()
self.assertEqual([parent_1, parent_2], item.parents)
def test_pull_nested(self):
class User(Document):
name = StringField()
class Collaborator(EmbeddedDocument):
user = StringField()
def __unicode__(self):
return '%s' % self.user
class Site(Document):
name = StringField(max_length=75, unique=True, required=True)
collaborators = ListField(EmbeddedDocumentField(Collaborator))
Site.drop_collection()
c = Collaborator(user='Esteban')
s = Site(name="test", collaborators=[c])
s.save()
Site.objects(id=s.id).update_one(pull__collaborators__user='Esteban')
self.assertEqual(Site.objects.first().collaborators, [])
def pull_all():
Site.objects(id=s.id).update_one(pull_all__collaborators__user=['Ross'])
self.assertRaises(InvalidQueryError, pull_all)
def test_update_one_pop_generic_reference(self):
class BlogTag(Document):
name = StringField(required=True)
class BlogPost(Document):
slug = StringField()
tags = ListField(ReferenceField(BlogTag), required=True)
BlogPost.drop_collection()
BlogTag.drop_collection()
tag_1 = BlogTag(name='code')
tag_1.save()
tag_2 = BlogTag(name='mongodb')
tag_2.save()
post = BlogPost(slug="test", tags=[tag_1])
post.save()
post = BlogPost(slug="test-2", tags=[tag_1, tag_2])
post.save()
self.assertEqual(len(post.tags), 2)
BlogPost.objects(slug="test-2").update_one(pop__tags=-1)
post.reload()
self.assertEqual(len(post.tags), 1)
BlogPost.drop_collection()
BlogTag.drop_collection()
def test_editting_embedded_objects(self):
class BlogTag(EmbeddedDocument):
name = StringField(required=True)
class BlogPost(Document):
slug = StringField()
tags = ListField(EmbeddedDocumentField(BlogTag), required=True)
BlogPost.drop_collection()
tag_1 = BlogTag(name='code')
tag_2 = BlogTag(name='mongodb')
post = BlogPost(slug="test", tags=[tag_1])
post.save()
post = BlogPost(slug="test-2", tags=[tag_1, tag_2])
post.save()
self.assertEqual(len(post.tags), 2)
BlogPost.objects(slug="test-2").update_one(set__tags__0__name="python")
post.reload()
self.assertEqual(post.tags[0].name, 'python')
BlogPost.objects(slug="test-2").update_one(pop__tags=-1)
post.reload()
self.assertEqual(len(post.tags), 1)
BlogPost.drop_collection()
def test_set_list_embedded_documents(self):
class Author(EmbeddedDocument):
name = StringField()
class Message(Document):
title = StringField()
authors = ListField(EmbeddedDocumentField('Author'))
Message.drop_collection()
message = Message(title="hello", authors=[Author(name="Harry")])
message.save()
Message.objects(authors__name="Harry").update_one(
set__authors__S=Author(name="Ross"))
message = message.reload()
self.assertEqual(message.authors[0].name, "Ross")
Message.objects(authors__name="Ross").update_one(
set__authors=[Author(name="Harry"),
Author(name="Ross"),
Author(name="Adam")])
message = message.reload()
self.assertEqual(message.authors[0].name, "Harry")
self.assertEqual(message.authors[1].name, "Ross")
self.assertEqual(message.authors[2].name, "Adam")
def test_order_by(self):
"""Ensure that QuerySets may be ordered.
"""
self.Person(name="User B", age=40).save()
self.Person(name="User A", age=20).save()
self.Person(name="User C", age=30).save()
names = [p.name for p in self.Person.objects.order_by('-age')]
self.assertEqual(names, ['User B', 'User C', 'User A'])
names = [p.name for p in self.Person.objects.order_by('+age')]
self.assertEqual(names, ['User A', 'User C', 'User B'])
names = [p.name for p in self.Person.objects.order_by('age')]
self.assertEqual(names, ['User A', 'User C', 'User B'])
ages = [p.age for p in self.Person.objects.order_by('-name')]
self.assertEqual(ages, [30, 40, 20])
def test_order_by_optional(self):
class BlogPost(Document):
title = StringField()
published_date = DateTimeField(required=False)
BlogPost.drop_collection()
blog_post_3 = BlogPost(title="Blog Post #3",
published_date=datetime(2010, 1, 6, 0, 0, 0))
blog_post_2 = BlogPost(title="Blog Post #2",
published_date=datetime(2010, 1, 5, 0, 0, 0))
blog_post_4 = BlogPost(title="Blog Post #4",
published_date=datetime(2010, 1, 7, 0, 0, 0))
blog_post_1 = BlogPost(title="Blog Post #1", published_date=None)
blog_post_3.save()
blog_post_1.save()
blog_post_4.save()
blog_post_2.save()
expected = [blog_post_1, blog_post_2, blog_post_3, blog_post_4]
self.assertSequence(BlogPost.objects.order_by('published_date'),
expected)
self.assertSequence(BlogPost.objects.order_by('+published_date'),
expected)
expected.reverse()
self.assertSequence(BlogPost.objects.order_by('-published_date'),
expected)
def test_order_by_list(self):
class BlogPost(Document):
title = StringField()
published_date = DateTimeField(required=False)
BlogPost.drop_collection()
blog_post_1 = BlogPost(title="A",
published_date=datetime(2010, 1, 6, 0, 0, 0))
blog_post_2 = BlogPost(title="B",
published_date=datetime(2010, 1, 6, 0, 0, 0))
blog_post_3 = BlogPost(title="C",
published_date=datetime(2010, 1, 7, 0, 0, 0))
blog_post_2.save()
blog_post_3.save()
blog_post_1.save()
qs = BlogPost.objects.order_by('published_date', 'title')
expected = [blog_post_1, blog_post_2, blog_post_3]
self.assertSequence(qs, expected)
qs = BlogPost.objects.order_by('-published_date', '-title')
expected.reverse()
self.assertSequence(qs, expected)
def test_order_by_chaining(self):
"""Ensure that an order_by query chains properly and allows .only()
"""
self.Person(name="User B", age=40).save()
self.Person(name="User A", age=20).save()
self.Person(name="User C", age=30).save()
only_age = self.Person.objects.order_by('-age').only('age')
names = [p.name for p in only_age]
ages = [p.age for p in only_age]
# The .only('age') clause should mean that all names are None
self.assertEqual(names, [None, None, None])
self.assertEqual(ages, [40, 30, 20])
qs = self.Person.objects.all().order_by('-age')
qs = qs.limit(10)
ages = [p.age for p in qs]
self.assertEqual(ages, [40, 30, 20])
qs = self.Person.objects.all().limit(10)
qs = qs.order_by('-age')
ages = [p.age for p in qs]
self.assertEqual(ages, [40, 30, 20])
qs = self.Person.objects.all().skip(0)
qs = qs.order_by('-age')
ages = [p.age for p in qs]
self.assertEqual(ages, [40, 30, 20])
def test_confirm_order_by_reference_wont_work(self):
"""Ordering by reference is not possible. Use map / reduce.. or
denormalise"""
class Author(Document):
author = ReferenceField(self.Person)
Author.drop_collection()
person_a = self.Person(name="User A", age=20)
person_a.save()
person_b = self.Person(name="User B", age=40)
person_b.save()
person_c = self.Person(name="User C", age=30)
person_c.save()
Author(author=person_a).save()
Author(author=person_b).save()
Author(author=person_c).save()
names = [a.author.name for a in Author.objects.order_by('-author__age')]
self.assertEqual(names, ['User A', 'User B', 'User C'])
def test_map_reduce(self):
"""Ensure map/reduce is both mapping and reducing.
"""
class BlogPost(Document):
title = StringField()
tags = ListField(StringField(), db_field='post-tag-list')
BlogPost.drop_collection()
BlogPost(title="Post #1", tags=['music', 'film', 'print']).save()
BlogPost(title="Post #2", tags=['music', 'film']).save()
BlogPost(title="Post #3", tags=['film', 'photography']).save()
map_f = """
function() {
this[~tags].forEach(function(tag) {
emit(tag, 1);
});
}
"""
reduce_f = """
function(key, values) {
var total = 0;
for(var i=0; i<values.length; i++) {
total += values[i];
}
return total;
}
"""
# run a map/reduce operation spanning all posts
results = BlogPost.objects.map_reduce(map_f, reduce_f, "myresults")
results = list(results)
self.assertEqual(len(results), 4)
music = list(filter(lambda r: r.key == "music", results))[0]
self.assertEqual(music.value, 2)
film = list(filter(lambda r: r.key == "film", results))[0]
self.assertEqual(film.value, 3)
BlogPost.drop_collection()
def test_map_reduce_with_custom_object_ids(self):
"""Ensure that QuerySet.map_reduce works properly with custom
primary keys.
"""
class BlogPost(Document):
title = StringField(primary_key=True)
tags = ListField(StringField())
post1 = BlogPost(title="Post #1", tags=["mongodb", "mongoengine"])
post2 = BlogPost(title="Post #2", tags=["django", "mongodb"])
post3 = BlogPost(title="Post #3", tags=["hitchcock films"])
post1.save()
post2.save()
post3.save()
self.assertEqual(BlogPost._fields['title'].db_field, '_id')
self.assertEqual(BlogPost._meta['id_field'], 'title')
map_f = """
function() {
emit(this._id, 1);
}
"""
# reduce to a list of tag ids and counts
reduce_f = """
function(key, values) {
var total = 0;
for(var i=0; i<values.length; i++) {
total += values[i];
}
return total;
}
"""
results = BlogPost.objects.map_reduce(map_f, reduce_f, "myresults")
results = list(results)
self.assertEqual(results[0].object, post1)
self.assertEqual(results[1].object, post2)
self.assertEqual(results[2].object, post3)
BlogPost.drop_collection()
def test_map_reduce_finalize(self):
"""Ensure that map, reduce, and finalize run and introduce "scope"
by simulating "hotness" ranking with Reddit algorithm.
"""
from time import mktime
class Link(Document):
title = StringField(db_field='bpTitle')
up_votes = IntField()
down_votes = IntField()
submitted = DateTimeField(db_field='sTime')
Link.drop_collection()
now = datetime.utcnow()
# Note: Test data taken from a custom Reddit homepage on
# Fri, 12 Feb 2010 14:36:00 -0600. Link ordering should
# reflect order of insertion below, but is not influenced
# by insertion order.
Link(title = "Google Buzz auto-followed a woman's abusive ex ...",
up_votes = 1079,
down_votes = 553,
submitted = now-timedelta(hours=4)).save()
Link(title = "We did it! Barbie is a computer engineer.",
up_votes = 481,
down_votes = 124,
submitted = now-timedelta(hours=2)).save()
Link(title = "This Is A Mosquito Getting Killed By A Laser",
up_votes = 1446,
down_votes = 530,
submitted=now-timedelta(hours=13)).save()
Link(title = "Arabic flashcards land physics student in jail.",
up_votes = 215,
down_votes = 105,
submitted = now-timedelta(hours=6)).save()
Link(title = "The Burger Lab: Presenting, the Flood Burger",
up_votes = 48,
down_votes = 17,
submitted = now-timedelta(hours=5)).save()
Link(title="How to see polarization with the naked eye",
up_votes = 74,
down_votes = 13,
submitted = now-timedelta(hours=10)).save()
map_f = """
function() {
emit(this[~id], {up_delta: this[~up_votes] - this[~down_votes],
sub_date: this[~submitted].getTime() / 1000})
}
"""
reduce_f = """
function(key, values) {
data = values[0];
x = data.up_delta;
// calculate time diff between reddit epoch and submission
sec_since_epoch = data.sub_date - reddit_epoch;
// calculate 'Y'
if(x > 0) {
y = 1;
} else if (x = 0) {
y = 0;
} else {
y = -1;
}
// calculate 'Z', the maximal value
if(Math.abs(x) >= 1) {
z = Math.abs(x);
} else {
z = 1;
}
return {x: x, y: y, z: z, t_s: sec_since_epoch};
}
"""
finalize_f = """
function(key, value) {
// f(sec_since_epoch,y,z) =
// log10(z) + ((y*sec_since_epoch) / 45000)
z_10 = Math.log(value.z) / Math.log(10);
weight = z_10 + ((value.y * value.t_s) / 45000);
return weight;
}
"""
# provide the reddit epoch (used for ranking) as a variable available
# to all phases of the map/reduce operation: map, reduce, and finalize.
reddit_epoch = mktime(datetime(2005, 12, 8, 7, 46, 43).timetuple())
scope = {'reddit_epoch': reddit_epoch}
# run a map/reduce operation across all links. ordering is set
# to "-value", which orders the "weight" value returned from
# "finalize_f" in descending order.
results = Link.objects.order_by("-value")
results = results.map_reduce(map_f,
reduce_f,
"myresults",
finalize_f=finalize_f,
scope=scope)
results = list(results)
# assert troublesome Buzz article is ranked 1st
self.assertTrue(results[0].object.title.startswith("Google Buzz"))
# assert laser vision is ranked last
self.assertTrue(results[-1].object.title.startswith("How to see"))
Link.drop_collection()
def test_item_frequencies(self):
"""Ensure that item frequencies are properly generated from lists.
"""
class BlogPost(Document):
hits = IntField()
tags = ListField(StringField(), db_field='blogTags')
BlogPost.drop_collection()
BlogPost(hits=1, tags=['music', 'film', 'actors', 'watch']).save()
BlogPost(hits=2, tags=['music', 'watch']).save()
BlogPost(hits=2, tags=['music', 'actors']).save()
def test_assertions(f):
f = dict((key, int(val)) for key, val in f.items())
self.assertEqual(set(['music', 'film', 'actors', 'watch']), set(f.keys()))
self.assertEqual(f['music'], 3)
self.assertEqual(f['actors'], 2)
self.assertEqual(f['watch'], 2)
self.assertEqual(f['film'], 1)
exec_js = BlogPost.objects.item_frequencies('tags')
map_reduce = BlogPost.objects.item_frequencies('tags', map_reduce=True)
test_assertions(exec_js)
test_assertions(map_reduce)
# Ensure query is taken into account
def test_assertions(f):
f = dict((key, int(val)) for key, val in f.items())
self.assertEqual(set(['music', 'actors', 'watch']), set(f.keys()))
self.assertEqual(f['music'], 2)
self.assertEqual(f['actors'], 1)
self.assertEqual(f['watch'], 1)
exec_js = BlogPost.objects(hits__gt=1).item_frequencies('tags')
map_reduce = BlogPost.objects(hits__gt=1).item_frequencies('tags', map_reduce=True)
test_assertions(exec_js)
test_assertions(map_reduce)
# Check that normalization works
def test_assertions(f):
self.assertAlmostEqual(f['music'], 3.0/8.0)
self.assertAlmostEqual(f['actors'], 2.0/8.0)
self.assertAlmostEqual(f['watch'], 2.0/8.0)
self.assertAlmostEqual(f['film'], 1.0/8.0)
exec_js = BlogPost.objects.item_frequencies('tags', normalize=True)
map_reduce = BlogPost.objects.item_frequencies('tags', normalize=True, map_reduce=True)
test_assertions(exec_js)
test_assertions(map_reduce)
# Check item_frequencies works for non-list fields
def test_assertions(f):
self.assertEqual(set([1, 2]), set(f.keys()))
self.assertEqual(f[1], 1)
self.assertEqual(f[2], 2)
exec_js = BlogPost.objects.item_frequencies('hits')
map_reduce = BlogPost.objects.item_frequencies('hits', map_reduce=True)
test_assertions(exec_js)
test_assertions(map_reduce)
BlogPost.drop_collection()
def test_item_frequencies_on_embedded(self):
"""Ensure that item frequencies are properly generated from lists.
"""
class Phone(EmbeddedDocument):
number = StringField()
class Person(Document):
name = StringField()
phone = EmbeddedDocumentField(Phone)
Person.drop_collection()
doc = Person(name="Guido")
doc.phone = Phone(number='62-3331-1656')
doc.save()
doc = Person(name="Marr")
doc.phone = Phone(number='62-3331-1656')
doc.save()
doc = Person(name="WP Junior")
doc.phone = Phone(number='62-3332-1656')
doc.save()
def test_assertions(f):
f = dict((key, int(val)) for key, val in f.items())
self.assertEqual(set(['62-3331-1656', '62-3332-1656']), set(f.keys()))
self.assertEqual(f['62-3331-1656'], 2)
self.assertEqual(f['62-3332-1656'], 1)
exec_js = Person.objects.item_frequencies('phone.number')
map_reduce = Person.objects.item_frequencies('phone.number', map_reduce=True)
test_assertions(exec_js)
test_assertions(map_reduce)
# Ensure query is taken into account
def test_assertions(f):
f = dict((key, int(val)) for key, val in f.items())
self.assertEqual(set(['62-3331-1656']), set(f.keys()))
self.assertEqual(f['62-3331-1656'], 2)
exec_js = Person.objects(phone__number='62-3331-1656').item_frequencies('phone.number')
map_reduce = Person.objects(phone__number='62-3331-1656').item_frequencies('phone.number', map_reduce=True)
test_assertions(exec_js)
test_assertions(map_reduce)
# Check that normalization works
def test_assertions(f):
self.assertEqual(f['62-3331-1656'], 2.0/3.0)
self.assertEqual(f['62-3332-1656'], 1.0/3.0)
exec_js = Person.objects.item_frequencies('phone.number', normalize=True)
map_reduce = Person.objects.item_frequencies('phone.number', normalize=True, map_reduce=True)
test_assertions(exec_js)
test_assertions(map_reduce)
def test_item_frequencies_null_values(self):
class Person(Document):
name = StringField()
city = StringField()
Person.drop_collection()
Person(name="Wilson Snr", city="CRB").save()
Person(name="Wilson Jr").save()
freq = Person.objects.item_frequencies('city')
self.assertEqual(freq, {'CRB': 1.0, None: 1.0})
freq = Person.objects.item_frequencies('city', normalize=True)
self.assertEqual(freq, {'CRB': 0.5, None: 0.5})
freq = Person.objects.item_frequencies('city', map_reduce=True)
self.assertEqual(freq, {'CRB': 1.0, None: 1.0})
freq = Person.objects.item_frequencies('city', normalize=True, map_reduce=True)
self.assertEqual(freq, {'CRB': 0.5, None: 0.5})
def test_item_frequencies_with_null_embedded(self):
class Data(EmbeddedDocument):
name = StringField()
class Extra(EmbeddedDocument):
tag = StringField()
class Person(Document):
data = EmbeddedDocumentField(Data, required=True)
extra = EmbeddedDocumentField(Extra)
Person.drop_collection()
p = Person()
p.data = Data(name="Wilson Jr")
p.save()
p = Person()
p.data = Data(name="Wesley")
p.extra = Extra(tag="friend")
p.save()
ot = Person.objects.item_frequencies('extra.tag', map_reduce=False)
self.assertEqual(ot, {None: 1.0, u'friend': 1.0})
ot = Person.objects.item_frequencies('extra.tag', map_reduce=True)
self.assertEqual(ot, {None: 1.0, u'friend': 1.0})
def test_item_frequencies_with_0_values(self):
class Test(Document):
val = IntField()
Test.drop_collection()
t = Test()
t.val = 0
t.save()
ot = Test.objects.item_frequencies('val', map_reduce=True)
self.assertEqual(ot, {0: 1})
ot = Test.objects.item_frequencies('val', map_reduce=False)
self.assertEqual(ot, {0: 1})
def test_item_frequencies_with_False_values(self):
class Test(Document):
val = BooleanField()
Test.drop_collection()
t = Test()
t.val = False
t.save()
ot = Test.objects.item_frequencies('val', map_reduce=True)
self.assertEqual(ot, {False: 1})
ot = Test.objects.item_frequencies('val', map_reduce=False)
self.assertEqual(ot, {False: 1})
def test_item_frequencies_normalize(self):
class Test(Document):
val = IntField()
Test.drop_collection()
for i in xrange(50):
Test(val=1).save()
for i in xrange(20):
Test(val=2).save()
freqs = Test.objects.item_frequencies('val', map_reduce=False, normalize=True)
self.assertEqual(freqs, {1: 50.0/70, 2: 20.0/70})
freqs = Test.objects.item_frequencies('val', map_reduce=True, normalize=True)
self.assertEqual(freqs, {1: 50.0/70, 2: 20.0/70})
def test_average(self):
"""Ensure that field can be averaged correctly."""
ages = [0, 23, 54, 12, 94, 27]
for i, age in enumerate(ages):
self.Person(name='test%s' % i, age=age).save()
self.Person(name='ageless person').save()
avg = float(sum(ages)) / (len(ages))
self.assertAlmostEqual(int(self.Person.objects.average('age')), avg)
def test_average_over_zero(self):
self.Person(name='person', age=0).save()
self.assertEqual(int(self.Person.objects.average('age')), 0)
def test_sum(self):
"""Ensure that field can be summed over correctly."""
ages = [0, 23, 54, 12, 94, 27]
for i, age in enumerate(ages):
self.Person(name='test%s' % i, age=age).save()
self.Person(name='ageless person').save()
self.assertEqual(int(self.Person.objects.sum('age')), sum(ages))
def test_average_over_db_field(self):
class UserVisit(Document):
num_visits = IntField(db_field='visits')
UserVisit.drop_collection()
UserVisit.objects.create(num_visits=20)
UserVisit.objects.create(num_visits=10)
self.assertEqual(UserVisit.objects.average('num_visits'), 15)
def test_sum_over_db_field(self):
class UserVisit(Document):
num_visits = IntField(db_field='visits')
UserVisit.drop_collection()
UserVisit.objects.create(num_visits=10)
UserVisit.objects.create(num_visits=5)
self.assertEqual(UserVisit.objects.sum('num_visits'), 15)
def test_distinct(self):
"""Ensure that the QuerySet.distinct method works.
"""
self.Person(name='Mr Orange', age=20).save()
self.Person(name='Mr White', age=20).save()
self.Person(name='Mr Orange', age=30).save()
self.Person(name='Mr Pink', age=30).save()
self.assertEqual(set(self.Person.objects.distinct('name')),
set(['Mr Orange', 'Mr White', 'Mr Pink']))
self.assertEqual(set(self.Person.objects.distinct('age')),
set([20, 30]))
self.assertEqual(set(self.Person.objects(age=30).distinct('name')),
set(['Mr Orange', 'Mr Pink']))
def test_distinct_handles_references(self):
class Foo(Document):
bar = ReferenceField("Bar")
class Bar(Document):
text = StringField()
Bar.drop_collection()
Foo.drop_collection()
bar = Bar(text="hi")
bar.save()
foo = Foo(bar=bar)
foo.save()
self.assertEqual(Foo.objects.distinct("bar"), [bar])
def test_distinct_handles_references_to_alias(self):
register_connection('testdb', 'mongoenginetest2')
class Foo(Document):
bar = ReferenceField("Bar")
meta = {'db_alias': 'testdb'}
class Bar(Document):
text = StringField()
meta = {'db_alias': 'testdb'}
Bar.drop_collection()
Foo.drop_collection()
bar = Bar(text="hi")
bar.save()
foo = Foo(bar=bar)
foo.save()
self.assertEqual(Foo.objects.distinct("bar"), [bar])
def test_distinct_handles_db_field(self):
"""Ensure that distinct resolves field name to db_field as expected.
"""
class Product(Document):
product_id = IntField(db_field='pid')
Product.drop_collection()
Product(product_id=1).save()
Product(product_id=2).save()
Product(product_id=1).save()
self.assertEqual(set(Product.objects.distinct('product_id')),
set([1, 2]))
self.assertEqual(set(Product.objects.distinct('pid')),
set([1, 2]))
Product.drop_collection()
def test_custom_manager(self):
"""Ensure that custom QuerySetManager instances work as expected.
"""
class BlogPost(Document):
tags = ListField(StringField())
deleted = BooleanField(default=False)
date = DateTimeField(default=datetime.now)
@queryset_manager
def objects(cls, qryset):
opts = {"deleted": False}
return qryset(**opts)
@queryset_manager
def music_posts(doc_cls, queryset, deleted=False):
return queryset(tags='music',
deleted=deleted).order_by('date')
BlogPost.drop_collection()
post1 = BlogPost(tags=['music', 'film']).save()
post2 = BlogPost(tags=['music']).save()
post3 = BlogPost(tags=['film', 'actors']).save()
post4 = BlogPost(tags=['film', 'actors', 'music'], deleted=True).save()
self.assertEqual([p.id for p in BlogPost.objects()],
[post1.id, post2.id, post3.id])
self.assertEqual([p.id for p in BlogPost.music_posts()],
[post1.id, post2.id])
self.assertEqual([p.id for p in BlogPost.music_posts(True)],
[post4.id])
BlogPost.drop_collection()
def test_custom_manager_overriding_objects_works(self):
class Foo(Document):
bar = StringField(default='bar')
active = BooleanField(default=False)
@queryset_manager
def objects(doc_cls, queryset):
return queryset(active=True)
@queryset_manager
def with_inactive(doc_cls, queryset):
return queryset(active=False)
Foo.drop_collection()
Foo(active=True).save()
Foo(active=False).save()
self.assertEqual(1, Foo.objects.count())
self.assertEqual(1, Foo.with_inactive.count())
Foo.with_inactive.first().delete()
self.assertEqual(0, Foo.with_inactive.count())
self.assertEqual(1, Foo.objects.count())
def test_inherit_objects(self):
class Foo(Document):
meta = {'allow_inheritance': True}
active = BooleanField(default=True)
@queryset_manager
def objects(klass, queryset):
return queryset(active=True)
class Bar(Foo):
pass
Bar.drop_collection()
Bar.objects.create(active=False)
self.assertEqual(0, Bar.objects.count())
def test_inherit_objects_override(self):
class Foo(Document):
meta = {'allow_inheritance': True}
active = BooleanField(default=True)
@queryset_manager
def objects(klass, queryset):
return queryset(active=True)
class Bar(Foo):
@queryset_manager
def objects(klass, queryset):
return queryset(active=False)
Bar.drop_collection()
Bar.objects.create(active=False)
self.assertEqual(0, Foo.objects.count())
self.assertEqual(1, Bar.objects.count())
def test_query_value_conversion(self):
"""Ensure that query values are properly converted when necessary.
"""
class BlogPost(Document):
author = ReferenceField(self.Person)
BlogPost.drop_collection()
person = self.Person(name='test', age=30)
person.save()
post = BlogPost(author=person)
post.save()
# Test that query may be performed by providing a document as a value
# while using a ReferenceField's name - the document should be
# converted to an DBRef, which is legal, unlike a Document object
post_obj = BlogPost.objects(author=person).first()
self.assertEqual(post.id, post_obj.id)
# Test that lists of values work when using the 'in', 'nin' and 'all'
post_obj = BlogPost.objects(author__in=[person]).first()
self.assertEqual(post.id, post_obj.id)
BlogPost.drop_collection()
def test_update_value_conversion(self):
"""Ensure that values used in updates are converted before use.
"""
class Group(Document):
members = ListField(ReferenceField(self.Person))
Group.drop_collection()
user1 = self.Person(name='user1')
user1.save()
user2 = self.Person(name='user2')
user2.save()
group = Group()
group.save()
Group.objects(id=group.id).update(set__members=[user1, user2])
group.reload()
self.assertTrue(len(group.members) == 2)
self.assertEqual(group.members[0].name, user1.name)
self.assertEqual(group.members[1].name, user2.name)
Group.drop_collection()
def test_dict_with_custom_baseclass(self):
"""Ensure DictField working with custom base clases.
"""
class Test(Document):
testdict = DictField()
Test.drop_collection()
t = Test(testdict={'f': 'Value'})
t.save()
self.assertEqual(Test.objects(testdict__f__startswith='Val').count(), 1)
self.assertEqual(Test.objects(testdict__f='Value').count(), 1)
Test.drop_collection()
class Test(Document):
testdict = DictField(basecls=StringField)
t = Test(testdict={'f': 'Value'})
t.save()
self.assertEqual(Test.objects(testdict__f='Value').count(), 1)
self.assertEqual(Test.objects(testdict__f__startswith='Val').count(), 1)
Test.drop_collection()
def test_bulk(self):
"""Ensure bulk querying by object id returns a proper dict.
"""
class BlogPost(Document):
title = StringField()
BlogPost.drop_collection()
post_1 = BlogPost(title="Post #1")
post_2 = BlogPost(title="Post #2")
post_3 = BlogPost(title="Post #3")
post_4 = BlogPost(title="Post #4")
post_5 = BlogPost(title="Post #5")
post_1.save()
post_2.save()
post_3.save()
post_4.save()
post_5.save()
ids = [post_1.id, post_2.id, post_5.id]
objects = BlogPost.objects.in_bulk(ids)
self.assertEqual(len(objects), 3)
self.assertTrue(post_1.id in objects)
self.assertTrue(post_2.id in objects)
self.assertTrue(post_5.id in objects)
self.assertTrue(objects[post_1.id].title == post_1.title)
self.assertTrue(objects[post_2.id].title == post_2.title)
self.assertTrue(objects[post_5.id].title == post_5.title)
BlogPost.drop_collection()
def tearDown(self):
self.Person.drop_collection()
def test_custom_querysets(self):
"""Ensure that custom QuerySet classes may be used.
"""
class CustomQuerySet(QuerySet):
def not_empty(self):
return self.count() > 0
class Post(Document):
meta = {'queryset_class': CustomQuerySet}
Post.drop_collection()
self.assertTrue(isinstance(Post.objects, CustomQuerySet))
self.assertFalse(Post.objects.not_empty())
Post().save()
self.assertTrue(Post.objects.not_empty())
Post.drop_collection()
def test_custom_querysets_set_manager_directly(self):
"""Ensure that custom QuerySet classes may be used.
"""
class CustomQuerySet(QuerySet):
def not_empty(self):
return self.count() > 0
class CustomQuerySetManager(QuerySetManager):
queryset_class = CustomQuerySet
class Post(Document):
objects = CustomQuerySetManager()
Post.drop_collection()
self.assertTrue(isinstance(Post.objects, CustomQuerySet))
self.assertFalse(Post.objects.not_empty())
Post().save()
self.assertTrue(Post.objects.not_empty())
Post.drop_collection()
def test_custom_querysets_managers_directly(self):
"""Ensure that custom QuerySet classes may be used.
"""
class CustomQuerySetManager(QuerySetManager):
@staticmethod
def get_queryset(doc_cls, queryset):
return queryset(is_published=True)
class Post(Document):
is_published = BooleanField(default=False)
published = CustomQuerySetManager()
Post.drop_collection()
Post().save()
Post(is_published=True).save()
self.assertEqual(Post.objects.count(), 2)
self.assertEqual(Post.published.count(), 1)
Post.drop_collection()
def test_custom_querysets_inherited(self):
"""Ensure that custom QuerySet classes may be used.
"""
class CustomQuerySet(QuerySet):
def not_empty(self):
return self.count() > 0
class Base(Document):
meta = {'abstract': True, 'queryset_class': CustomQuerySet}
class Post(Base):
pass
Post.drop_collection()
self.assertTrue(isinstance(Post.objects, CustomQuerySet))
self.assertFalse(Post.objects.not_empty())
Post().save()
self.assertTrue(Post.objects.not_empty())
Post.drop_collection()
def test_custom_querysets_inherited_direct(self):
"""Ensure that custom QuerySet classes may be used.
"""
class CustomQuerySet(QuerySet):
def not_empty(self):
return self.count() > 0
class CustomQuerySetManager(QuerySetManager):
queryset_class = CustomQuerySet
class Base(Document):
meta = {'abstract': True}
objects = CustomQuerySetManager()
class Post(Base):
pass
Post.drop_collection()
self.assertTrue(isinstance(Post.objects, CustomQuerySet))
self.assertFalse(Post.objects.not_empty())
Post().save()
self.assertTrue(Post.objects.not_empty())
Post.drop_collection()
def test_count_limit_and_skip(self):
class Post(Document):
title = StringField()
Post.drop_collection()
for i in xrange(10):
Post(title="Post %s" % i).save()
self.assertEqual(5, Post.objects.limit(5).skip(5).count())
self.assertEqual(10, Post.objects.limit(5).skip(5).count(with_limit_and_skip=False))
def test_call_after_limits_set(self):
"""Ensure that re-filtering after slicing works
"""
class Post(Document):
title = StringField()
Post.drop_collection()
Post(title="Post 1").save()
Post(title="Post 2").save()
posts = Post.objects.all()[0:1]
self.assertEqual(len(list(posts())), 1)
Post.drop_collection()
def test_order_then_filter(self):
"""Ensure that ordering still works after filtering.
"""
class Number(Document):
n = IntField()
Number.drop_collection()
n2 = Number.objects.create(n=2)
n1 = Number.objects.create(n=1)
self.assertEqual(list(Number.objects), [n2, n1])
self.assertEqual(list(Number.objects.order_by('n')), [n1, n2])
self.assertEqual(list(Number.objects.order_by('n').filter()), [n1, n2])
Number.drop_collection()
def test_clone(self):
"""Ensure that cloning clones complex querysets
"""
class Number(Document):
n = IntField()
Number.drop_collection()
for i in xrange(1, 101):
t = Number(n=i)
t.save()
test = Number.objects
test2 = test.clone()
self.assertFalse(test == test2)
self.assertEqual(test.count(), test2.count())
test = test.filter(n__gt=11)
test2 = test.clone()
self.assertFalse(test == test2)
self.assertEqual(test.count(), test2.count())
test = test.limit(10)
test2 = test.clone()
self.assertFalse(test == test2)
self.assertEqual(test.count(), test2.count())
Number.drop_collection()
def test_unset_reference(self):
class Comment(Document):
text = StringField()
class Post(Document):
comment = ReferenceField(Comment)
Comment.drop_collection()
Post.drop_collection()
comment = Comment.objects.create(text='test')
post = Post.objects.create(comment=comment)
self.assertEqual(post.comment, comment)
Post.objects.update(unset__comment=1)
post.reload()
self.assertEqual(post.comment, None)
Comment.drop_collection()
Post.drop_collection()
def test_order_works_with_custom_db_field_names(self):
class Number(Document):
n = IntField(db_field='number')
Number.drop_collection()
n2 = Number.objects.create(n=2)
n1 = Number.objects.create(n=1)
self.assertEqual(list(Number.objects), [n2,n1])
self.assertEqual(list(Number.objects.order_by('n')), [n1,n2])
Number.drop_collection()
def test_order_works_with_primary(self):
"""Ensure that order_by and primary work.
"""
class Number(Document):
n = IntField(primary_key=True)
Number.drop_collection()
Number(n=1).save()
Number(n=2).save()
Number(n=3).save()
numbers = [n.n for n in Number.objects.order_by('-n')]
self.assertEqual([3, 2, 1], numbers)
numbers = [n.n for n in Number.objects.order_by('+n')]
self.assertEqual([1, 2, 3], numbers)
Number.drop_collection()
def test_ensure_index(self):
"""Ensure that manual creation of indexes works.
"""
class Comment(Document):
message = StringField()
meta = {'allow_inheritance': True}
Comment.ensure_index('message')
info = Comment.objects._collection.index_information()
info = [(value['key'],
value.get('unique', False),
value.get('sparse', False))
for key, value in info.iteritems()]
self.assertTrue(([('_cls', 1), ('message', 1)], False, False) in info)
def test_where(self):
"""Ensure that where clauses work.
"""
class IntPair(Document):
fielda = IntField()
fieldb = IntField()
IntPair.objects._collection.delete_many({})
a = IntPair(fielda=1, fieldb=1)
b = IntPair(fielda=1, fieldb=2)
c = IntPair(fielda=2, fieldb=1)
a.save()
b.save()
c.save()
query = IntPair.objects.where('this[~fielda] >= this[~fieldb]')
self.assertEqual('this["fielda"] >= this["fieldb"]', query._where_clause)
results = list(query)
self.assertEqual(2, len(results))
self.assertTrue(a in results)
self.assertTrue(c in results)
query = IntPair.objects.where('this[~fielda] == this[~fieldb]')
results = list(query)
self.assertEqual(1, len(results))
self.assertTrue(a in results)
query = IntPair.objects.where('function() { return this[~fielda] >= this[~fieldb] }')
self.assertEqual('function() { return this["fielda"] >= this["fieldb"] }', query._where_clause)
results = list(query)
self.assertEqual(2, len(results))
self.assertTrue(a in results)
self.assertTrue(c in results)
def invalid_where():
list(IntPair.objects.where(fielda__gte=3))
self.assertRaises(TypeError, invalid_where)
def test_scalar(self):
class Organization(Document):
id = ObjectIdField('_id')
name = StringField()
class User(Document):
id = ObjectIdField('_id')
name = StringField()
organization = ObjectIdField()
User.drop_collection()
Organization.drop_collection()
whitehouse = Organization(name="White House")
whitehouse.save()
User(name="Bob Dole", organization=whitehouse.id).save()
# Efficient way to get all unique organization names for a given
# set of users (Pretend this has additional filtering.)
user_orgs = set(User.objects.scalar('organization'))
orgs = Organization.objects(id__in=user_orgs).scalar('name')
self.assertEqual(list(orgs), ['White House'])
# Efficient for generating listings, too.
orgs = Organization.objects.scalar('name').in_bulk(list(user_orgs))
user_map = User.objects.scalar('name', 'organization')
user_listing = [(user, orgs[org]) for user, org in user_map]
self.assertEqual([("Bob Dole", "White House")], user_listing)
def test_scalar_simple(self):
class TestDoc(Document):
x = IntField()
y = BooleanField()
TestDoc.drop_collection()
TestDoc(x=10, y=True).save()
TestDoc(x=20, y=False).save()
TestDoc(x=30, y=True).save()
plist = list(TestDoc.objects.scalar('x', 'y'))
self.assertEqual(len(plist), 3)
self.assertEqual(plist[0], (10, True))
self.assertEqual(plist[1], (20, False))
self.assertEqual(plist[2], (30, True))
class UserDoc(Document):
name = StringField()
age = IntField()
UserDoc.drop_collection()
UserDoc(name="Wilson Jr", age=19).save()
UserDoc(name="Wilson", age=43).save()
UserDoc(name="Eliana", age=37).save()
UserDoc(name="Tayza", age=15).save()
ulist = list(UserDoc.objects.scalar('name', 'age'))
self.assertEqual(ulist, [
(u'Wilson Jr', 19),
(u'Wilson', 43),
(u'Eliana', 37),
(u'Tayza', 15)])
ulist = list(UserDoc.objects.scalar('name').order_by('age'))
self.assertEqual(ulist, [
(u'Tayza'),
(u'Wilson Jr'),
(u'Eliana'),
(u'Wilson')])
def test_scalar_embedded(self):
class Profile(EmbeddedDocument):
name = StringField()
age = IntField()
class Locale(EmbeddedDocument):
city = StringField()
country = StringField()
class Person(Document):
profile = EmbeddedDocumentField(Profile)
locale = EmbeddedDocumentField(Locale)
Person.drop_collection()
Person(profile=Profile(name="Wilson Jr", age=19),
locale=Locale(city="Corumba-GO", country="Brazil")).save()
Person(profile=Profile(name="Gabriel Falcao", age=23),
locale=Locale(city="New York", country="USA")).save()
Person(profile=Profile(name="Lincoln de souza", age=28),
locale=Locale(city="Belo Horizonte", country="Brazil")).save()
Person(profile=Profile(name="Walter cruz", age=30),
locale=Locale(city="Brasilia", country="Brazil")).save()
self.assertEqual(
list(Person.objects.order_by('profile__age').scalar('profile__name')),
[u'Wilson Jr', u'Gabriel Falcao', u'Lincoln de souza', u'Walter cruz'])
ulist = list(Person.objects.order_by('locale.city')
.scalar('profile__name', 'profile__age', 'locale__city'))
self.assertEqual(ulist,
[(u'Lincoln de souza', 28, u'Belo Horizonte'),
(u'Walter cruz', 30, u'Brasilia'),
(u'Wilson Jr', 19, u'Corumba-GO'),
(u'Gabriel Falcao', 23, u'New York')])
def test_scalar_reference_field(self):
class State(Document):
name = StringField()
class Person(Document):
name = StringField()
state = ReferenceField(State)
State.drop_collection()
Person.drop_collection()
s1 = State(name="Goias")
s1.save()
Person(name="Wilson JR", state=s1).save()
plist = list(Person.objects.scalar('name', 'state'))
self.assertEqual(plist, [(u'Wilson JR', s1)])
def test_scalar_generic_reference_field(self):
class State(Document):
name = StringField()
class Person(Document):
name = StringField()
state = GenericReferenceField()
State.drop_collection()
Person.drop_collection()
s1 = State(name="Goias")
s1.save()
Person(name="Wilson JR", state=s1).save()
plist = list(Person.objects.scalar('name', 'state'))
self.assertEqual(plist, [(u'Wilson JR', s1)])
def test_scalar_db_field(self):
class TestDoc(Document):
x = IntField()
y = BooleanField()
TestDoc.drop_collection()
TestDoc(x=10, y=True).save()
TestDoc(x=20, y=False).save()
TestDoc(x=30, y=True).save()
plist = list(TestDoc.objects.scalar('x', 'y'))
self.assertEqual(len(plist), 3)
self.assertEqual(plist[0], (10, True))
self.assertEqual(plist[1], (20, False))
self.assertEqual(plist[2], (30, True))
def test_scalar_primary_key(self):
class SettingValue(Document):
key = StringField(primary_key=True)
value = StringField()
SettingValue.drop_collection()
s = SettingValue(key="test", value="test value")
s.save()
val = SettingValue.objects.scalar('key', 'value')
self.assertEqual(list(val), [('test', 'test value')])
def test_scalar_cursor_behaviour(self):
"""Ensure that a query returns a valid set of results.
"""
person1 = self.Person(name="User A", age=20)
person1.save()
person2 = self.Person(name="User B", age=30)
person2.save()
# Find all people in the collection
people = self.Person.objects.scalar('name')
self.assertEqual(people.count(), 2)
results = list(people)
self.assertEqual(results[0], "User A")
self.assertEqual(results[1], "User B")
# Use a query to filter the people found to just person1
people = self.Person.objects(age=20).scalar('name')
self.assertEqual(people.count(), 1)
person = people.next()
self.assertEqual(person, "User A")
# Test limit
people = list(self.Person.objects.limit(1).scalar('name'))
self.assertEqual(len(people), 1)
self.assertEqual(people[0], 'User A')
# Test skip
people = list(self.Person.objects.skip(1).scalar('name'))
self.assertEqual(len(people), 1)
self.assertEqual(people[0], 'User B')
person3 = self.Person(name="User C", age=40)
person3.save()
# Test slice limit
people = list(self.Person.objects[:2].scalar('name'))
self.assertEqual(len(people), 2)
self.assertEqual(people[0], 'User A')
self.assertEqual(people[1], 'User B')
# Test slice skip
people = list(self.Person.objects[1:].scalar('name'))
self.assertEqual(len(people), 2)
self.assertEqual(people[0], 'User B')
self.assertEqual(people[1], 'User C')
# Test slice limit and skip
people = list(self.Person.objects[1:2].scalar('name'))
self.assertEqual(len(people), 1)
self.assertEqual(people[0], 'User B')
people = list(self.Person.objects[1:1].scalar('name'))
self.assertEqual(len(people), 0)
# Test slice out of range
people = list(self.Person.objects.scalar('name')[80000:80001])
self.assertEqual(len(people), 0)
# Test larger slice __repr__
self.Person.objects.delete()
for i in xrange(55):
self.Person(name='A%s' % i, age=i).save()
self.assertEqual(self.Person.objects.scalar('name').count(), 55)
self.assertEqual("A0", "%s" % self.Person.objects.order_by('name').scalar('name').first())
self.assertEqual("A0", "%s" % self.Person.objects.scalar('name').order_by('name')[0])
if PY3:
self.assertEqual("['A1', 'A2']", "%s" % self.Person.objects.order_by('age').scalar('name')[1:3])
self.assertEqual("['A51', 'A52']", "%s" % self.Person.objects.order_by('age').scalar('name')[51:53])
else:
self.assertEqual("[u'A1', u'A2']", "%s" % self.Person.objects.order_by('age').scalar('name')[1:3])
self.assertEqual("[u'A51', u'A52']", "%s" % self.Person.objects.order_by('age').scalar('name')[51:53])
# with_id and in_bulk
person = self.Person.objects.order_by('name').first()
self.assertEqual("A0", "%s" % self.Person.objects.scalar('name').with_id(person.id))
pks = self.Person.objects.order_by('age').scalar('pk')[1:3]
if PY3:
self.assertEqual("['A1', 'A2']", "%s" % sorted(self.Person.objects.scalar('name').in_bulk(list(pks)).values()))
else:
self.assertEqual("[u'A1', u'A2']", "%s" % sorted(self.Person.objects.scalar('name').in_bulk(list(pks)).values()))
def test_elem_match(self):
class Foo(EmbeddedDocument):
shape = StringField()
color = StringField()
trick = BooleanField()
meta = {'allow_inheritance': False}
class Bar(Document):
foo = ListField(EmbeddedDocumentField(Foo))
meta = {'allow_inheritance': False}
Bar.drop_collection()
b1 = Bar(foo=[Foo(shape= "square", color ="purple", thick = False),
Foo(shape= "circle", color ="red", thick = True)])
b1.save()
b2 = Bar(foo=[Foo(shape= "square", color ="red", thick = True),
Foo(shape= "circle", color ="purple", thick = False)])
b2.save()
ak = list(Bar.objects(foo__match={'shape': "square", "color": "purple"}))
self.assertEqual([b1], ak)
def test_upsert_includes_cls(self):
"""Upserts should include _cls information for inheritable classes
"""
class Test(Document):
test = StringField()
Test.drop_collection()
Test.objects(test='foo').update_one(upsert=True, set__test='foo')
self.assertFalse('_cls' in Test._collection.find_one())
class Test(Document):
meta = {'allow_inheritance': True}
test = StringField()
Test.drop_collection()
Test.objects(test='foo').update_one(upsert=True, set__test='foo')
self.assertTrue('_cls' in Test._collection.find_one())
def test_clear_cls_query(self):
class Test(Document):
meta = {'allow_inheritance': True}
test = StringField()
Test.drop_collection()
Test.objects.create(test='foo')
tests = Test.objects.clear_cls_query().all()
self.assertEqual(tests.count(), 1)
self.assertEqual(tests._initial_query, {})
def test_read_preference(self):
class Bar(Document):
txt = StringField()
meta = {
'indexes': [ 'txt' ]
}
Bar.drop_collection()
bars = list(Bar.objects(read_preference=ReadPreference.PRIMARY))
self.assertEqual([], bars)
with self.assertRaises(TypeError) as cm:
Bar.objects(read_preference='Primary')
self.assertEqual(
str(cm.exception), "'Primary' is not a read preference."
)
# read_preference as a kwarg
bars = Bar.objects(read_preference=ReadPreference.SECONDARY_PREFERRED)
self.assertEqual(
bars._read_preference, ReadPreference.SECONDARY_PREFERRED
)
self.assertEqual(
bars._cursor.collection.read_preference,
ReadPreference.SECONDARY_PREFERRED
)
# read_preference as a query set method
bars = Bar.objects.read_preference(ReadPreference.SECONDARY)
self.assertEqual(
bars._read_preference,
ReadPreference.SECONDARY
)
self.assertEqual(
bars._cursor.collection.read_preference,
ReadPreference.SECONDARY
)
# read_preference after skip
bars = Bar.objects.skip(1) \
.read_preference(ReadPreference.SECONDARY_PREFERRED)
self.assertEqual(
bars._read_preference, ReadPreference.SECONDARY_PREFERRED
)
self.assertEqual(
bars._cursor.collection.read_preference,
ReadPreference.SECONDARY_PREFERRED
)
# read_preference after limit
bars = Bar.objects.limit(1) \
.read_preference(ReadPreference.SECONDARY)
self.assertEqual(
bars._read_preference, ReadPreference.SECONDARY
)
self.assertEqual(
bars._cursor.collection.read_preference,
ReadPreference.SECONDARY
)
# read_preference after order_by
bars = Bar.objects.order_by('txt') \
.read_preference(ReadPreference.SECONDARY_PREFERRED)
self.assertEqual(
bars._read_preference, ReadPreference.SECONDARY_PREFERRED
)
self.assertEqual(
bars._cursor.collection.read_preference,
ReadPreference.SECONDARY_PREFERRED
)
# read_preference after hint
bars = Bar.objects.hint([('txt', 1)]) \
.read_preference(ReadPreference.SECONDARY_PREFERRED)
self.assertEqual(
bars._read_preference, ReadPreference.SECONDARY_PREFERRED
)
self.assertEqual(
bars._cursor.collection.read_preference,
ReadPreference.SECONDARY_PREFERRED
)
def test_json_simple(self):
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
string = StringField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
Doc(string="Hi", embedded_field=Embedded(string="Hi")).save()
Doc(string="Bye", embedded_field=Embedded(string="Bye")).save()
Doc().save()
json_data = Doc.objects.to_json()
doc_objects = list(Doc.objects)
self.assertEqual(doc_objects, Doc.objects.from_json(json_data))
def test_json_complex(self):
if pymongo.version_tuple[0] <= 2 and pymongo.version_tuple[1] <= 3:
raise SkipTest("Need pymongo 2.4 as has a fix for DBRefs")
class EmbeddedDoc(EmbeddedDocument):
pass
class Simple(Document):
pass
class Doc(Document):
string_field = StringField(default='1')
int_field = IntField(default=1)
float_field = FloatField(default=1.1)
boolean_field = BooleanField(default=True)
datetime_field = DateTimeField(default=datetime.now)
embedded_document_field = EmbeddedDocumentField(
EmbeddedDoc, default=lambda: EmbeddedDoc())
list_field = ListField(default=lambda: [1, 2, 3])
dict_field = DictField(default=lambda: {"hello": "world"})
objectid_field = ObjectIdField(default=ObjectId)
reference_field = ReferenceField(Simple, default=lambda: Simple().save())
map_field = MapField(IntField(), default=lambda: {"simple": 1})
complex_datetime_field = ComplexDateTimeField(default=datetime.now)
url_field = URLField(default="http://mongoengine.org")
dynamic_field = DynamicField(default=1)
generic_reference_field = GenericReferenceField(default=lambda: Simple().save())
sorted_list_field = SortedListField(IntField(),
default=lambda: [1, 2, 3])
email_field = EmailField(default="[email protected]")
geo_point_field = GeoPointField(default=lambda: [1, 2])
sequence_field = SequenceField()
uuid_field = UUIDField(default=uuid.uuid4)
generic_embedded_document_field = GenericEmbeddedDocumentField(
default=lambda: EmbeddedDoc())
Simple.drop_collection()
Doc.drop_collection()
Doc().save()
json_data = Doc.objects.to_json()
doc_objects = list(Doc.objects)
self.assertEqual(doc_objects, Doc.objects.from_json(json_data))
def test_as_pymongo(self):
class User(Document):
id = ObjectIdField('_id')
name = StringField()
age = IntField()
User.drop_collection()
User(name="Bob Dole", age=89).save()
User(name="Barack Obama", age=51).save()
users = User.objects.only('name').as_pymongo()
results = list(users)
self.assertTrue(isinstance(results[0], dict))
self.assertTrue(isinstance(results[1], dict))
self.assertEqual(results[0]['name'], 'Bob Dole')
self.assertEqual(results[1]['name'], 'Barack Obama')
# Make sure _id is included in the results
users = User.objects.only('id', 'name').as_pymongo()
results = list(users)
self.assertTrue('_id' in results[0])
self.assertTrue('_id' in results[1])
# Test coerce_types
users = User.objects.only('name').as_pymongo(coerce_types=True)
results = list(users)
self.assertTrue(isinstance(results[0], dict))
self.assertTrue(isinstance(results[1], dict))
self.assertEqual(results[0]['name'], 'Bob Dole')
self.assertEqual(results[1]['name'], 'Barack Obama')
def test_as_pymongo_json_limit_fields(self):
class User(Document):
email = EmailField(unique=True, required=True)
password_hash = StringField(db_field='password_hash', required=True)
password_salt = StringField(db_field='password_salt', required=True)
User.drop_collection()
User(email="[email protected]", password_salt="SomeSalt", password_hash="SomeHash").save()
serialized_user = User.objects.exclude('password_salt', 'password_hash').as_pymongo()[0]
self.assertEqual(set(['_id', 'email']), set(serialized_user.keys()))
serialized_user = User.objects.exclude('id', 'password_salt', 'password_hash').to_json()
self.assertEqual('[{"email": "[email protected]"}]', serialized_user)
serialized_user = User.objects.exclude('password_salt').only('email').as_pymongo()[0]
self.assertEqual(set(['email']), set(serialized_user.keys()))
serialized_user = User.objects.exclude('password_salt').only('email').to_json()
self.assertEqual('[{"email": "[email protected]"}]', serialized_user)
@unittest.skip("not implemented")
def test_no_dereference(self):
class Organization(Document):
name = StringField()
class User(Document):
name = StringField()
organization = ReferenceField(Organization)
User.drop_collection()
Organization.drop_collection()
whitehouse = Organization(name="White House").save()
User(name="Bob Dole", organization=whitehouse).save()
qs = User.objects()
self.assertTrue(isinstance(qs.first().organization, Organization))
self.assertFalse(isinstance(qs.no_dereference().first().organization,
Organization))
self.assertTrue(isinstance(qs.first().organization, Organization))
def test_cached_queryset(self):
class Person(Document):
name = StringField()
Person.drop_collection()
for i in xrange(100):
Person(name="No: %s" % i).save()
with query_counter() as q:
self.assertEqual(q, 0)
people = Person.objects
[x for x in people]
self.assertEqual(100, len(people._result_cache))
self.assertEqual(None, people._len)
self.assertEqual(q, 1)
list(people)
self.assertEqual(100, people._len) # Caused by list calling len
self.assertEqual(q, 1)
people.count() # count is cached
self.assertEqual(q, 1)
def test_cache_not_cloned(self):
class User(Document):
name = StringField()
def __unicode__(self):
return self.name
User.drop_collection()
User(name="Alice").save()
User(name="Bob").save()
users = User.objects.all().order_by('name')
self.assertEqual("%s" % users, "[<User: Alice>, <User: Bob>]")
self.assertEqual(2, len(users._result_cache))
users = users.filter(name="Bob")
self.assertEqual("%s" % users, "[<User: Bob>]")
self.assertEqual(1, len(users._result_cache))
def test_nested_queryset_iterator(self):
# Try iterating the same queryset twice, nested.
names = ['Alice', 'Bob', 'Chuck', 'David', 'Eric', 'Francis', 'George']
class User(Document):
name = StringField()
def __unicode__(self):
return self.name
User.drop_collection()
for name in names:
User(name=name).save()
users = User.objects.all().order_by('name')
outer_count = 0
inner_count = 0
inner_total_count = 0
with query_counter() as q:
self.assertEqual(q, 0)
self.assertEqual(users.count(), 7)
for i, outer_user in enumerate(users):
self.assertEqual(outer_user.name, names[i])
outer_count += 1
inner_count = 0
# Calling len might disrupt the inner loop if there are bugs
self.assertEqual(users.count(), 7)
for j, inner_user in enumerate(users):
self.assertEqual(inner_user.name, names[j])
inner_count += 1
inner_total_count += 1
self.assertEqual(inner_count, 7) # inner loop should always be executed seven times
self.assertEqual(outer_count, 7) # outer loop should be executed seven times total
self.assertEqual(inner_total_count, 7 * 7) # inner loop should be executed fourtynine times total
self.assertEqual(q, 2)
def test_no_sub_classes(self):
class A(Document):
x = IntField()
y = IntField()
meta = {'allow_inheritance': True}
class B(A):
z = IntField()
class C(B):
zz = IntField()
A.drop_collection()
A(x=10, y=20).save()
A(x=15, y=30).save()
B(x=20, y=40).save()
B(x=30, y=50).save()
C(x=40, y=60).save()
self.assertEqual(A.objects.no_sub_classes().count(), 2)
self.assertEqual(A.objects.count(), 5)
self.assertEqual(B.objects.no_sub_classes().count(), 2)
self.assertEqual(B.objects.count(), 3)
self.assertEqual(C.objects.no_sub_classes().count(), 1)
self.assertEqual(C.objects.count(), 1)
for obj in A.objects.no_sub_classes():
self.assertEqual(obj.__class__, A)
for obj in B.objects.no_sub_classes():
self.assertEqual(obj.__class__, B)
for obj in C.objects.no_sub_classes():
self.assertEqual(obj.__class__, C)
def test_query_reference_to_custom_pk_doc(self):
class A(Document):
id = StringField(unique=True, primary_key=True)
class B(Document):
a = ReferenceField(A)
A.drop_collection()
B.drop_collection()
a = A.objects.create(id='custom_id')
b = B.objects.create(a=a)
self.assertEqual(B.objects.count(), 1)
self.assertEqual(B.objects.get(a=a).a, a)
self.assertEqual(B.objects.get(a=a.id).a, a)
def test_cls_query_in_subclassed_docs(self):
class Animal(Document):
name = StringField()
meta = {
'allow_inheritance': True
}
class Dog(Animal):
pass
class Cat(Animal):
pass
self.assertEqual(Animal.objects(name='Charlie')._query, {
'name': 'Charlie',
'_cls': { '$in': ('Animal', 'Animal.Dog', 'Animal.Cat') }
})
self.assertEqual(Dog.objects(name='Charlie')._query, {
'name': 'Charlie',
'_cls': 'Animal.Dog'
})
self.assertEqual(Cat.objects(name='Charlie')._query, {
'name': 'Charlie',
'_cls': 'Animal.Cat'
})
if __name__ == '__main__':
unittest.main()
| mit | -8,190,873,866,847,155,000 | 32.575385 | 126 | 0.576139 | false |
cloudify-cosmo/cloudify-jboss-plugin | jboss/utils.py | 1 | 3159 | ###############################################################################
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import errno
import shutil
import tempfile
import subprocess
from cloudify import ctx
class Utils:
""" Utility class of useful tools """
def __init__(self):
self.tempdir = tempfile.mkdtemp()
ctx.logger.info('Tempdir created: [{0}]'.format(self.tempdir))
def __del__(self):
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
ctx.logger.info('Tempdir removed: [{0}]'.format(self.tempdir))
@staticmethod
def save_command_to_file(command, file_path):
"""
:param command: command to be put into file
:param file_name: full path to command file
:return:
"""
with open(file_path, 'w+') as file:
file.write(command)
@staticmethod
def append_command_flags(flags_string, file_path):
"""
:param flags_string: command to be put into file
:param file_name: full path to command file
:return:
"""
with open(file_path, 'a+') as file:
file.write(' ' + flags_string)
@staticmethod
def append_command_to_file(command, file_path):
"""
:param command: command to be put into file
:param file_name: full path to command file
:return:
"""
with open(file_path, 'a+') as file:
file.write('\n' + command)
@staticmethod
def system(*args, **kwargs):
"""
Run system command.
:param args: list of commandline arguments
:param kwargs:
:return:
"""
kwargs.setdefault('stdout', subprocess.PIPE)
process = subprocess.Popen(args, **kwargs)
out, err = process.communicate()
return out
@staticmethod
def delete_file(filepath):
"""
Delete file from current filepath.
:param filepath: filepath
:return:
"""
if os.path.exists(filepath):
os.remove(filepath)
ctx.logger.info('File removed: [{0}]'.format(filepath))
@staticmethod
def create_subdirs_recursively(path):
"""
Create all directories that are in path and no exist.
:param path: path to be created
:return:
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| apache-2.0 | -1,661,285,359,231,721,700 | 30.277228 | 79 | 0.563153 | false |
probml/pyprobml | scripts/linreg_contours_sse_plot.py | 1 | 1204 | # Plot error surface for linear regression model.
# Based on https://github.com/probml/pmtk3/blob/master/demos/contoursSSEdemo.m
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = os.path.join(os.environ["PYPROBML"], "figures")
def save_fig(fname): plt.savefig(os.path.join(figdir, fname))
from mpl_toolkits.mplot3d import axes3d, Axes3D
np.random.seed(0)
N = 21
x = np.linspace(0.0, 20, N)
X0 = x.reshape(N,1)
X = np.c_[np.ones((N,1)), X0]
w = np.array([-1.5, 1/9.])
y = w[0]*x + w[1]*np.square(x)
y = y + np.random.normal(0, 1, N) * 2
w = np.linalg.lstsq(X, y, rcond=None)[0]
W0, W1 = np.meshgrid(np.linspace(-8,0,100), np.linspace(-0.5,1.5,100))
SS = np.array([sum((w0*X[:,0] + w1*X[:,1] - y)**2) for w0, w1 in zip(np.ravel(W0), np.ravel(W1))])
SS = SS.reshape(W0.shape)
plt.figure()
plt.contourf(W0, W1, SS)
save_fig('linregHeatmapSSE.pdf')
plt.colorbar()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(W0, W1, SS)
save_fig('linregSurfSSE.pdf')
plt.show()
fig,ax = plt.subplots()
CS = plt.contour(W0, W1, SS, levels=np.linspace(0,2000,10), cmap='jet')
plt.plot(w[0], w[1],'x')
save_fig('linregContoursSSE.pdf')
plt.show() | mit | 2,998,280,091,559,601,000 | 26.386364 | 98 | 0.665282 | false |
HAZARDU5/sgdialer | src/nz/co/hazardmedia/sgdialer/controllers/SoundController.py | 1 | 5593 | __author__ = 'Michael Andrew [email protected]'
import random
import pygame
from pygame import mixer
from pygame.mixer import Sound
from nz.co.hazardmedia.sgdialer.config.Config import Config
from nz.co.hazardmedia.sgdialer.models.SoundModel import SoundModel
from nz.co.hazardmedia.sgdialer.events.EventType import EventType
class SoundController(object):
sounds = {}
def __init__(self):
pygame.mixer.pre_init(44100, -16, 2, 2048)
pygame.mixer.init()
"""self.channel1 = Channel(0)
self.channel2 = Channel(1)
self.channel3 = Channel(2)
self.channel4 = Channel(3)
self.channel5 = Channel(4)
self.channel6 = Channel(5)
self.channel7 = Channel(6)
self.channel8 = Channel(7)"""
if mixer.get_init():
print "Mixer initialized."
print "SoundController initialized."
def preload_sounds(self, files):
"""
Preload Sounds
:param files: Dictionary of file objects in the format {"file_name": "name", "delay": 1000, "delay_min": 1000,
"delay_max": 1000} where the key is the ID of the file as a string
"""
for key in files:
path = Config.sound_path + '/' + files[key]["file_name"]
print "Preloading sound from path: " + path
#if delay key does not exist set its default value
if not "delay" in files[key]:
files[key]["delay"] = 0
if not "delay_min" in files[key]:
files[key]["delay_min"] = 0
if not "delay_max" in files[key]:
files[key]["delay_max"] = 0
self.sounds[key] = SoundModel(Sound(path), path, files[key]["delay"], files[key]["delay_min"],
files[key]["delay_max"])
def play(self, name, queue_sounds=False, play_next_queued_sound=False, loop_forever=False, callback=None):
if not mixer.get_init():
print "Mixer not initialized! Cannot play sound."
#channel = mixer.find_channel()
#channel.play(self.sounds[id])
sound_item = self.sounds[name]
if queue_sounds:
if mixer.music.get_busy():
mixer.music.queue(sound_item.path)
print "Queued sound: " + name
if play_next_queued_sound:
mixer.music.play()
if callback:
print "Channel playback end callback defined"
self.channel_playback_ended_listener(mixer.music, callback)
else:
mixer.music.load(sound_item.path)
if loop_forever:
mixer.music.play(-1)
else:
mixer.music.play()
print "Playing sound: " + name
if callback:
print "Channel playback end callback defined"
self.channel_playback_ended_listener(mixer.music, callback)
else:
if loop_forever:
loops = -1
else:
loops = 0
if sound_item.delay == sound_item.delay_min == sound_item.delay_max == 0:
sound_item.sound.play(loops)
elif sound_item.delay > 0:
#pygame.time.wait(sound_item.delay)
self.play_after_delay(sound_item.sound, sound_item.delay, loops)
elif sound_item.delay_min == sound_item.delay_max:
self.play_after_delay(sound_item.sound, sound_item.delay_min, loops)
#pygame.time.wait(sound_item.delay_min)
elif sound_item.delay_min > 0 and sound_item.delay_max > 0:
rand = random.randrange(sound_item.delay_min, sound_item.delay_max, 250)
#pygame.time.wait(rand)
self.play_after_delay(sound_item.sound, rand, loops)
print "Playing sound: " + name
def play_after_delay(self, sound, delay=1000, loops=0):
pygame.time.set_timer(EventType.SOUND_PLAY_AFTER_DELAY, delay)
got_event = False
while not got_event:
for event in pygame.event.get():
if event.type == EventType.SOUND_PLAY_AFTER_DELAY:
sound.play(loops)
got_event = True
break
def stop(self, name):
print "Stopping sound: "+name
sound_item = self.sounds[name]
sound_item.sound.stop()
def play_when_idle(self, name, loop_forever=False):
mixer.music.set_endevent(EventType.SOUND_PLAYBACK_ENDED)
got_event = False
while not got_event:
for event in pygame.event.get():
if event.type == EventType.SOUND_PLAYBACK_ENDED:
print("Sound playback ended")
mixer.music.set_endevent()
self.play(name, False, False, loop_forever)
got_event = True
break
def channel_playback_ended_listener(self, channel, callback):
channel.set_endevent(EventType.SOUND_PLAYBACK_ENDED_CHANNEL)
print "Listening for channel playback end"
got_event = False
while not got_event:
for event in pygame.event.get():
if event.type == EventType.SOUND_PLAYBACK_ENDED_CHANNEL:
print("Sound playback ended for channel: "+str(channel))
channel.set_endevent()
callback()
got_event = True
break | mit | -3,581,254,749,395,123,000 | 32.698795 | 118 | 0.550867 | false |
openstack/monasca-ui | monitoring/config/local_settings.py | 1 | 4631 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# Service group names (global across all projects):
MONITORING_SERVICES_GROUPS = [
{'name': _('OpenStack Services'), 'groupBy': 'service'},
{'name': _('Servers'), 'groupBy': 'hostname'}
]
# Services being monitored
MONITORING_SERVICES = getattr(
settings,
'MONITORING_SERVICES_GROUPS',
MONITORING_SERVICES_GROUPS
)
#
# Per project service groups. If in this form,
# '*' will be applied to all projects not explicitly listed.
#
# Note the above form (flat) is supported for backward compatibility.
#
# MONITORING_SERVICES_GROUPS = [
# {'admin': [
# {'name': _('OpenStack Services'), 'groupBy': 'service'},
# {'name': _('Servers'), 'groupBy': 'hostname'}]},
# {'*': [
# {'name': _('Services'), 'groupBy': 'service'},
# {'name': _('Instances'), 'groupBy': 'hostname'}]},
# ]
MONITORING_SERVICE_VERSION = getattr(
settings, 'MONITORING_SERVICE_VERSION', '2_0'
)
MONITORING_SERVICE_TYPE = getattr(
settings, 'MONITORING_SERVICE_TYPE', 'monitoring'
)
MONITORING_ENDPOINT_TYPE = getattr(
# NOTE(trebskit) # will default to OPENSTACK_ENDPOINT_TYPE
settings, 'MONITORING_ENDPOINT_TYPE', None
)
# Grafana button titles/file names (global across all projects):
GRAFANA_LINKS = []
DASHBOARDS = getattr(settings, 'GRAFANA_LINKS', GRAFANA_LINKS)
#
# Horizon will link to the grafana home page when using Grafana2.
# For any Grafana version additional links to specific dashboards can be
# created in two formats.
# Flat:
# GRAFANA_LINKS = [ {'title': _('Dashboard'), 'path': 'openstack', 'raw': False} ]
#
# Per project: '*' will be applied to all projects not explicitly listed.
# GRAFANA_LINKS = [
# {'admin': [
# {'title': _('Dashboard'), 'path': 'openstack', 'raw': False}]},
# {'*': [
# {'title': _('OpenStack Dashboard'), 'path': 'project', 'raw': False}]}
# ]
#
# If GRAFANA_URL is specified, the dashboard file name/raw URL must be
# specified through the 'path' attribute as shown above.
#
# Flat:
# GRAFANA_LINKS = [ {'title': _('Dashboard'), 'fileName': 'openstack.json', 'raw': False} ]
#
# GRAFANA_LINKS = [
# {'admin': [
# {'fileName': _('Dashboard'), 'fileName': 'openstack.json', 'raw': False}]},
# {'*': [
# {'title': _('OpenStack Dashboard'), 'fileName': 'project.json': False}]}
# ]
#
# If GRAFANA_URL is unspecified the dashboard file name must be specified
# through the fileName attribute.
#
# Both with and without GRAFANA_URL, the links have an optional 'raw' attribute
# which defaults to False if unspecified. If it is False, the value of 'path'
# (or 'fileName', respectively) is interpreted as a dashboard name and a link
# to the dashboard based on the dashboard's name will be generated. If it is
# True, the value of 'path' or 'fileName' will be treated as a URL to be used
# verbatim.
GRAFANA_URL = getattr(settings, 'GRAFANA_URL', None)
# If GRAFANA_URL is specified, an additional link will be shown that points to
# Grafana's list of dashboards. If you do not wish this, set SHOW_GRAFANA_HOME
# to False (by default this setting is True and the link will thus be shown).
SHOW_GRAFANA_HOME = getattr(settings, 'SHOW_GRAFANA_HOME', True)
ENABLE_LOG_MANAGEMENT_BUTTON = getattr(settings, 'ENABLE_LOG_MANAGEMENT_BUTTON', True)
ENABLE_EVENT_MANAGEMENT_BUTTON = getattr(settings, 'ENABLE_EVENT_MANAGEMENT_BUTTON', False)
KIBANA_POLICY_RULE = getattr(settings, 'KIBANA_POLICY_RULE',
'monitoring:kibana_access')
KIBANA_POLICY_SCOPE = getattr(settings, 'KIBANA_POLICY_SCOPE',
'monitoring')
KIBANA_HOST = getattr(settings, 'KIBANA_HOST', 'http://192.168.10.6:5601/')
OPENSTACK_SSL_NO_VERIFY = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
OPENSTACK_SSL_CACERT = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
POLICY_FILES = getattr(settings, 'POLICY_FILES', {})
POLICY_FILES.update({'monitoring': 'monitoring_policy.json',}) # noqa
setattr(settings, 'POLICY_FILES', POLICY_FILES)
| apache-2.0 | 3,803,075,964,564,306,000 | 37.272727 | 91 | 0.684086 | false |
guywithprtzl/SoundScrape | setup.py | 1 | 1273 | import os
from setuptools import setup
# Set external files
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f:
required = f.read().splitlines()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='soundscrape',
version='0.17.0',
packages=['soundscrape'],
install_requires=required,
include_package_data=True,
license='MIT License',
description='Scrape an artist from SoundCloud',
long_description=README,
url='https://github.com/Miserlou/SoundScrape',
author='Rich Jones',
author_email='[email protected]',
entry_points={
'console_scripts': [
'soundscrape = soundscrape.soundscrape:main',
]
},
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| mit | -4,324,277,316,226,223,000 | 31.641026 | 78 | 0.629222 | false |
shbhrsaha/rondo | rondo/rondo.py | 1 | 3666 | """
Record and replay mouse and keyboard actions in VirtualBox sessions
Usage:
python rondo.py [--r record_log_file] [--p replay_log_file] virtual_machine_name
"""
import time
import datetime
import logging
import argparse
import virtualbox
logging.basicConfig(level=logging.INFO)
f = None
last_record_log = None
def record_keyboard(event):
"""
Save a keyboard action.
"""
global last_record_log
global f
logging.info("Keyboard %s" % event.scancodes)
now = datetime.datetime.now()
diff = now - last_record_log
last_record_log = now
scan_codes = [str(x) for x in event.scancodes]
f.write("%s K %s \n" % (diff.total_seconds(), " ".join(scan_codes)))
def record_mouse(event):
"""
Save a mouse action
"""
global last_record_log
global f
logging.info("Mouse %s %s %s %s %s %s" % (event.mode, event.buttons, event.x, event.y, event.z, event.w))
now = datetime.datetime.now()
diff = now - last_record_log
last_record_log = now
f.write("%s M %s %s %s %s %s \n" % (diff.total_seconds(), event.buttons, event.x, event.y, event.z, event.w))
def main():
global last_record_log
global f
parser = argparse.ArgumentParser(
description='Record and replay mouse and keyboard actions in VirtualBox sessions')
parser.add_argument('vm_name', help='Name of virtual machine')
parser.add_argument('--r', dest='record',
default=False, help='Record a session to a log file')
parser.add_argument('--p', dest='replay',
default=False, help='Replay a session from a log file')
args = parser.parse_args()
if args.record and args.replay:
raise Exception("Cannot simultaneously record and replay.")
logging.info("Connecting to virtual machine")
try:
vbox = virtualbox.VirtualBox()
vm = vbox.find_machine(args.vm_name)
session = vm.create_session()
except:
raise Exception("Could not find virtual machine %s. Please make sure it exists." % args.vm_name)
if args.record:
try:
logging.info("Registering to receive keyboard and mouse events")
session.console.keyboard.register_on_guest_keyboard(record_keyboard)
session.console.mouse.register_on_guest_mouse(record_mouse)
except:
raise Exception("Could not register with virtual machine %s. Please make sure it exists." % args.vm_name)
f = open(args.record,"w")
last_record_log = datetime.datetime.now()
logging.info("Recording... Press <ENTER> to stop.")
stop = raw_input()
f.close()
elif args.replay:
try:
f = open(args.replay,"r")
except:
raise Exception("Could not find log file %s." % args.replay)
for line in f.readlines():
line = line.replace("\n","")
line_split = line.strip().split(" ")
time_delta = float(line_split[0])
event_type = line_split[1]
options = line_split[2:]
time.sleep(time_delta)
if event_type == "M":
logging.info("Executing mouse %s" % line)
session.console.mouse.put_mouse_event(2*int(options[1]), 2*int(options[2]), int(options[3]), int(options[4]), int(options[0]))
if event_type == "K":
logging.info("Executing keyboard %s" % line)
session.console.keyboard.put_scancodes([int(x) for x in options])
else:
raise Exception("You must specify either --r to record or --p to replay.")
if __name__ == "__main__":
main() | mit | 83,888,517,469,684,080 | 32.642202 | 142 | 0.603382 | false |
appop/bitcoin | contrib/seeds/makeseeds.py | 1 | 5719 | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The nealcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Satoshi:0.12.(0|1|99)/|/Satoshi:0.13.(0|1|2|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple nealcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| mit | -6,261,724,423,219,290,000 | 32.25 | 186 | 0.561287 | false |
mamrhein/identifiers | src/identifiers/finance.py | 1 | 6972 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# Name: finance
# Purpose: International identifiers for tradable financial assets
#
# Author: Michael Amrhein ([email protected])
#
# Copyright: (c) 2016 Michael Amrhein
# License: This program is part of a larger application. For license
# details please read the file LICENSE.TXT provided together
# with the application.
# ---------------------------------------------------------------------------
# $Source$
# $Revision$
"""International identifiers for tradable financial assets"""
# standard library imports
from string import ascii_uppercase, digits
from typing import Tuple
# third-party imports
from iso3166 import countries
# local imports
from .identifier import Identifier
from .luhn import luhn
from .micutils import get_mic_record
_ALPHABET = digits + ascii_uppercase
class MIC(Identifier):
"""Market Identifier Code
A unique identification code used to identify securities trading
exchanges, regulated and non-regulated trading markets.
Each MIC is a four alpha character code, defined in ISO 10383.
"""
__slots__ = ()
# noinspection PyMissingConstructor
def __init__(self, mic: str) -> None:
"""
Args:
mic (str): string representation of the MIC
Returns:
:class:`MIC` instance
Raises:
TypeError: given `mic` is not an instance of str
ValueError: given `mic` not found in the registry
"""
if not isinstance(mic, str):
raise TypeError("Argument must be instance of 'str'.")
mic = mic.strip()
try:
get_mic_record(mic)
except KeyError:
raise ValueError(f"Unknown MIC: '{mic}'.")
self._id = mic
def __str__(self) -> str:
"""str(self)"""
return self._id
class ISIN(Identifier):
"""International Securities Identification Number
An International Securities Identification Number uniquely identifies a
tradable financial asset, a.k.a security.
As defined in ISO 6166, each ISIN consists of a two-letter ISO 3166-1
Country Code for the issuing country, followed by nine alpha-numeric
characters (the National Securities Identifying Number, or NSIN, which
identifies the security), and one numerical check digit, calculated by the
Luhn algorithm.
"""
__slots__ = ()
@staticmethod
def calc_check_digit(country_code: str, nsin: str) -> str:
"""Calculate ISIN check digit."""
return str(luhn(country_code + nsin))
@property
def country_code(self) -> str:
"""Return the ISIN's Country Code."""
return self._id[:2]
@property
def check_digit(self) -> str:
"""Return the ISIN's check digits."""
return self._id[-1]
@property
def nsin(self) -> str:
"""Return the ISIN's National Securities Identifying Number."""
return self._id[2:-1]
def elements(self) -> Tuple[str, str, str]:
"""Return the ISIN's Country Code, National Securities Identifying
Number and check digit as tuple."""
return self.country_code, self.nsin, self.check_digit
# noinspection PyMissingConstructor
def __init__(self, *args: str) -> None:
"""Instances of :class:`ISIN` can be created in two ways, by providing
a Unicode string representation of an ISIN or by providing a country
code and a national securities identifying number.
**1. Form**
Args:
isin (str): string representation of an ISIN
Returns:
instance of :class:`ISIN`
Raises:
TypeError: given `isin` is not a `Unicode string`
ValueError: given `isin` contains an unknown country code
ValueError: given `isin` contains a wrong check digit
ValueError: given `isin` must be 12 characters long
ValueError: given `isin` contains invalid character(s)
**2. Form**
Args:
country_code (str): 2-character country code
according to ISO 3166
nsin (str): national securities identifying
number
Returns:
instance of :class:`ISIN`
Raises:
TypeError: invalid number of arguments
TypeError: given `country_code` is not a `Unicode string`
ValueError: given `country_code` contains an invalid or unknown
country code
TypeError: given `nsin` is not a `Unicode string`
ValueError: length of given `nsin` not valid
ValueError: given `nsin` contains invalid character(s)
"""
n_args = len(args)
if n_args == 1:
arg0 = args[0]
if not isinstance(arg0, str):
raise TypeError("Argument must be instance of 'str'.")
arg0 = arg0.strip()
if len(arg0) != 12:
raise ValueError('Invalid ISIN format: '
'given string must be 12 characters long.')
country_code = arg0[:2]
try:
countries.get(country_code)
except KeyError:
raise ValueError(f"Unknown country code: '{country_code}'.")
nsin = arg0[2:-1]
check_digit = self.__class__.calc_check_digit(country_code, nsin)
if check_digit != arg0[-1]:
raise ValueError("Wrong check digit; should be "
f"'{check_digit}'.")
self._id = arg0
elif n_args == 2:
arg0 = args[0]
if not isinstance(arg0, str):
raise TypeError("Country code must be instance of 'str'.")
if len(arg0) != 2:
raise ValueError("Country code must be a 2-character string.")
country_code = arg0
try:
countries.get(country_code)
except KeyError:
raise ValueError(f"Unknown country code: '{country_code}'.")
arg1 = args[1]
if isinstance(arg1, str):
len_nsin = len(arg1)
if len_nsin == 9:
nsin = arg1
elif 6 <= len_nsin < 9:
nsin = arg1.rjust(9, '0')
else:
raise ValueError("Given NSIN must contain between 6 and 9"
" characters.")
else:
raise TypeError("Given nsin must be instance of 'str'.")
check_digit = self.__class__.calc_check_digit(country_code, nsin)
self._id = ''.join((country_code, nsin, check_digit))
else:
raise TypeError('Invalid number of arguments.')
def __str__(self) -> str:
"""str(self)"""
return self._id
| bsd-2-clause | 1,852,831,554,744,964,400 | 32.681159 | 78 | 0.562966 | false |
vesellov/bitdust.devel | blockchain/pybc/token.py | 1 | 13459 | #!/usr/bin/env python2.7
# token.py: a digital token implemented on top of pybc.coin
#
#------------------------------------------------------------------------------
from __future__ import absolute_import
import logging
#------------------------------------------------------------------------------
from . import json_coin
from . import transactions
from . import util
#------------------------------------------------------------------------------
def pack_token(token, payloads):
"""
"""
return dict(
t=token,
p=payloads,
)
def unpack_token(json_data):
"""
"""
try:
return json_data['t'], json_data['p']
except (TypeError, KeyError):
return None, None
class TokenRecord(object):
"""
"""
def __init__(self, block_hash=None, transaction_hash=None):
"""
"""
self.block_hash = block_hash
self.transaction_hash = transaction_hash
self.token_id = None
self.amount = None
self.address = None
self.prev_transaction_hash = None
self.prev_output_index = None
self.prev_amount = None
self.prev_address = None
self.input_payloads = []
self.output_payloads = []
def __str__(self):
return "'{}' with {} addressed from {} to {}".format(
self.token_id,
self.value(),
util.bytes2string(self.prev_address or '') or '<own balance>',
util.bytes2string(self.address or '') or '<own balance>',
)
def add_output(self, output_tuple):
"""
"""
self.amount, self.address, _ = output_tuple
output_token_id, self.output_payloads = unpack_token(output_tuple[2])
if output_token_id is None:
raise ValueError('output json data does not contain token record')
if self.token_id is None:
self.token_id = output_token_id
if output_token_id != self.token_id:
raise ValueError('output token ID does not match to input')
def add_input(self, input_tuple):
"""
"""
input_token_id, self.input_payloads = unpack_token(input_tuple[4])
if input_token_id is None:
raise ValueError('input json data does not contain token record')
if self.token_id is None:
self.token_id = input_token_id
if input_token_id != self.token_id:
raise ValueError('input token ID does not match to output')
self.prev_transaction_hash, self.prev_output_index, self.prev_amount, self.prev_address, _ = input_tuple
def value(self):
"""
"""
return self.amount or self.prev_amount
class TokenProfile(object):
"""
"""
def __init__(self, token_records=[]):
self.token_id = None
self.records = []
for t_record in token_records:
self.add_record(t_record)
logging.info('\n{}'.format(str(self)))
def __str__(self):
"""
"""
lines = []
lines.append("---TokenProfile {}---".format(self.token_id))
lines.append("{} records".format(len(self.records)))
for i, record in enumerate(self.records):
lines.append("\t{}: {}".format(i, str(record)))
lines.append('Owner: {}'.format(util.bytes2string(self.owner().address)))
lines.append('Creator: {}'.format(util.bytes2string(self.creator().address)))
return "\n".join(lines)
def add_record(self, token_record):
if token_record in self.records:
raise ValueError('duplicated token record')
if self.token_id is None:
self.token_id = token_record.token_id
if self.token_id != token_record.token_id:
raise ValueError('invalid token ID, not matching with first record')
if not self.records:
self.records.append(token_record)
return
if token_record.prev_address is None:
# this is "create new token" record
self.records.insert(0, token_record)
return
if token_record.address is None:
# this is "delete existing token" record
self.records.append(token_record)
return
for i, existing_record in enumerate(self.records):
if existing_record.prev_address is None:
if existing_record.address == token_record.prev_address:
# put after the first record
self.records.insert(i + 1, token_record)
return
if existing_record.address is None:
if existing_record.prev_address == token_record.address:
# put before the last record
self.records.insert(i, token_record)
return
if existing_record.address == token_record.prev_address:
# put after matched record in the middle
self.records.insert(i, token_record)
return
if existing_record.prev_address == token_record.address:
# put before matched record in the middle
self.records.insert(i, token_record)
return
# BAD CASE: put it just before the last record
self.records.insert(-1, token_record)
def creator(self):
"""
"""
return self.records[0]
def owner(self):
"""
"""
return self.records[-1]
class TokenBlockchain(json_coin.JsonCoinBlockchain):
"""
"""
def iterate_records(self, include_inputs=True):
with self.lock:
for block in self.longest_chain():
for transaction_bytes in transactions.unpack_transactions(block.payload):
json_transaction = json_coin.JsonTransaction.from_bytes(transaction_bytes)
token_records = dict()
if include_inputs:
for tr_input in json_transaction.inputs:
token_id, _ = unpack_token(tr_input[4])
if not token_id:
continue
if token_id in token_records:
raise ValueError('duplicated token ID in transaction inputs')
token_records[token_id] = [tr_input, None, ]
for tr_output in json_transaction.outputs:
token_id, _ = unpack_token(tr_output[2])
if not token_id:
continue
if token_id not in token_records:
token_records[token_id] = [None, None, ]
token_records[token_id][1] = tr_output
for token_id, input_output in token_records.items():
tr_input, tr_output = input_output
token_record = TokenRecord(
block_hash=block.block_hash(),
transaction_hash=json_transaction.transaction_hash(),
)
if tr_input is not None:
try:
token_record.add_input(tr_input)
except ValueError:
logging.exception('Failed to add an input to the token record: {}'.format(tr_input))
continue
if tr_output is not None:
try:
token_record.add_output(tr_output)
except ValueError:
logging.exception('Failed to add an output to the token record: {}'.format(tr_output))
continue
yield token_record
def iterate_records_by_address(self, address, include_inputs=True):
"""
"""
with self.lock:
for token_record in self.iterate_records(include_inputs=include_inputs):
if token_record.address == address:
yield token_record
def iterate_records_by_token(self, token_id, include_inputs=True):
"""
"""
with self.lock:
for token_record in self.iterate_records(include_inputs=include_inputs):
if token_record.token_id == token_id:
yield token_record
def get_records_by_token(self, token):
"""
"""
with self.lock:
return [t for t in self.iterate_records_by_token(token, include_inputs=True)]
def is_records_for_address(self, address):
"""
"""
with self.lock:
for _ in self.iterate_records_by_address(address, include_inputs=False):
return True
return False
def is_records_for_token(self, token):
"""
"""
with self.lock:
for _ in self.iterate_records_by_token(token, include_inputs=False):
return True
return False
def get_token_profile(self, token):
"""
"""
with self.lock:
try:
return TokenProfile(self.get_records_by_token(token))
except ValueError:
return None
def get_token_profiles_by_owner(self, address):
"""
"""
with self.lock:
result = []
related_token_ids = set()
token_records_by_id = dict()
for token_record in self.iterate_records(include_inputs=True):
if token_record.token_id not in token_records_by_id:
token_records_by_id[token_record.token_id] = []
token_records_by_id[token_record.token_id].append(token_record)
if token_record.address == address:
related_token_ids.add(token_record.token_id)
for token_id in related_token_ids:
result.append(TokenProfile(token_records_by_id[token_id][:]))
logging.info('{} tokens was found'.format(len(result)))
return result
class TokenWallet(json_coin.JsonWallet):
"""
"""
def tokens_list(self):
return self.blockchain.get_token_profiles_by_owner(self.get_address())
def token_create(self, token, value, address=None, fee=1, payload=None, auth_data=None):
"""
"""
with self.lock:
if self.blockchain.is_records_for_token(token):
raise Exception('found existing token, but all tokens must be unique')
return self.make_simple_transaction(
value,
address or self.get_address(),
fee=fee,
json_data=pack_token(token, [payload, ]),
auth_data=auth_data,
spendable_filter=self._skip_all_tokens,
)
def token_delete(self, token, address=None, fee=1, auth_data=None):
"""
"""
with self.lock:
token_profile = self.blockchain.get_token_profile(token)
if not token_profile:
raise Exception('this token is not exist')
if token_profile.owner().address != self.get_address():
raise Exception('this token is not belong to you')
return self.make_simple_transaction(
token_profile.owner().amount,
address or self.get_address(),
fee=fee,
json_data=None,
auth_data=auth_data,
spendable_filter=lambda tr_input: self._skip_tokens_except_one(token, tr_input),
)
def token_transfer(self, token, new_address, new_value=None, fee=1, payload=None, payload_history=True, auth_data=None):
"""
"""
with self.lock:
token_profile = self.blockchain.get_token_profile(token)
if not token_profile:
raise Exception('this token is not exist')
if token_profile.owner().address != self.get_address():
raise Exception('this token is not belong to you')
payloads = token_profile.owner().output_payloads
if payload:
if payload_history:
payloads += [payload, ]
else:
payloads = [payload, ]
new_value = new_value or token_profile.owner().amount
return self.make_simple_transaction(
new_value,
new_address,
fee=fee,
json_data=pack_token(token, payloads),
auth_data=auth_data,
spendable_filter=lambda tr_input: self._skip_tokens_except_one(token, tr_input),
)
def _skip_all_tokens(self, tr_input):
"""
Filter input tuple and return bool result.
If input does not contain a token, then those coins are spendable.
"""
token_id, _ = unpack_token(tr_input[4])
return token_id is None
def _skip_tokens_except_one(self, spendable_token, tr_input):
"""
Filter input tuple and return bool result.
If input does not contain a token or we want to "delete/sell" this token,
then those coins are spendable.
"""
token_id, _ = unpack_token(tr_input[4])
return token_id is None or token_id == spendable_token
| agpl-3.0 | -8,504,476,359,857,393,000 | 36.80618 | 124 | 0.526042 | false |
aurex-linux/virt-manager | virtManager/manager.py | 1 | 40214 | #
# Copyright (C) 2006-2008, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import logging
# pylint: disable=E0611
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GdkPixbuf
# pylint: enable=E0611
from virtinst import util
from virtManager import vmmenu
from virtManager import uiutil
from virtManager.connection import vmmConnection
from virtManager.baseclass import vmmGObjectUI
from virtManager.graphwidgets import CellRendererSparkline
import libvirt
# Number of data points for performance graphs
GRAPH_LEN = 40
# fields in the tree model data set
(ROW_HANDLE,
ROW_SORT_KEY,
ROW_MARKUP,
ROW_STATUS_ICON,
ROW_HINT,
ROW_IS_CONN,
ROW_IS_CONN_CONNECTED,
ROW_IS_VM,
ROW_IS_VM_RUNNING,
ROW_COLOR,
ROW_INSPECTION_OS_ICON) = range(11)
# Columns in the tree view
(COL_NAME,
COL_GUEST_CPU,
COL_HOST_CPU,
COL_MEM,
COL_DISK,
COL_NETWORK) = range(6)
def _style_get_prop(widget, propname):
value = GObject.Value()
value.init(GObject.TYPE_INT)
widget.style_get_property(propname, value)
return value.get_int()
def _get_inspection_icon_pixbuf(vm, w, h):
# libguestfs gives us the PNG data as a string.
png_data = vm.inspection.icon
if png_data is None:
return None
try:
pb = GdkPixbuf.PixbufLoader()
pb.set_size(w, h)
pb.write(png_data)
pb.close()
return pb.get_pixbuf()
except:
logging.exception("Error loading inspection icon data")
vm.inspection.icon = None
return None
class vmmManager(vmmGObjectUI):
__gsignals__ = {
"action-show-connect": (GObject.SignalFlags.RUN_FIRST, None, []),
"action-show-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-show-about": (GObject.SignalFlags.RUN_FIRST, None, []),
"action-show-host": (GObject.SignalFlags.RUN_FIRST, None, [str]),
"action-show-preferences": (GObject.SignalFlags.RUN_FIRST, None, []),
"action-show-create": (GObject.SignalFlags.RUN_FIRST, None, [str]),
"action-suspend-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-resume-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-run-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-shutdown-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-reset-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-reboot-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-destroy-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-save-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-migrate-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-delete-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-clone-domain": (GObject.SignalFlags.RUN_FIRST, None, [str, str]),
"action-exit-app": (GObject.SignalFlags.RUN_FIRST, None, []),
"manager-closed": (GObject.SignalFlags.RUN_FIRST, None, []),
"manager-opened": (GObject.SignalFlags.RUN_FIRST, None, []),
"remove-conn": (GObject.SignalFlags.RUN_FIRST, None, [str]),
"add-default-conn": (GObject.SignalFlags.RUN_FIRST, None, []),
}
def __init__(self):
vmmGObjectUI.__init__(self, "manager.ui", "vmm-manager")
self.ignore_pause = False
# Mapping of VM UUID -> tree model rows to
# allow O(1) access instead of O(n)
self.rows = {}
w, h = self.config.get_manager_window_size()
self.topwin.set_default_size(w or 550, h or 550)
self.prev_position = None
self.vmmenu = vmmenu.VMActionMenu(self, self.current_vm)
self.connmenu = Gtk.Menu()
self.connmenu_items = {}
self.builder.connect_signals({
"on_menu_view_guest_cpu_usage_activate":
self.toggle_stats_visible_guest_cpu,
"on_menu_view_host_cpu_usage_activate":
self.toggle_stats_visible_host_cpu,
"on_menu_view_memory_usage_activate":
self.toggle_stats_visible_memory_usage,
"on_menu_view_disk_io_activate" :
self.toggle_stats_visible_disk,
"on_menu_view_network_traffic_activate":
self.toggle_stats_visible_network,
"on_vm_manager_delete_event": self.close,
"on_vmm_manager_configure_event": self.window_resized,
"on_menu_file_add_connection_activate": self.new_conn,
"on_menu_new_vm_activate": self.new_vm,
"on_menu_file_quit_activate": self.exit_app,
"on_menu_file_close_activate": self.close,
"on_vmm_close_clicked": self.close,
"on_vm_open_clicked": self.show_vm,
"on_vm_run_clicked": self.start_vm,
"on_vm_new_clicked": self.new_vm,
"on_vm_shutdown_clicked": self.poweroff_vm,
"on_vm_pause_clicked": self.pause_vm_button,
"on_menu_edit_details_activate": self.show_vm,
"on_menu_edit_delete_activate": self.do_delete,
"on_menu_host_details_activate": self.show_host,
"on_vm_list_row_activated": self.show_vm,
"on_vm_list_button_press_event": self.popup_vm_menu_button,
"on_vm_list_key_press_event": self.popup_vm_menu_key,
"on_menu_edit_preferences_activate": self.show_preferences,
"on_menu_help_about_activate": self.show_about,
})
# There seem to be ref counting issues with calling
# list.get_column, so avoid it
self.diskcol = None
self.netcol = None
self.memcol = None
self.guestcpucol = None
self.hostcpucol = None
self.spacer_txt = None
self.init_vmlist()
self.init_stats()
self.init_toolbar()
self.init_context_menus()
self.update_current_selection()
self.widget("vm-list").get_selection().connect(
"changed", self.update_current_selection)
self.max_disk_rate = 10.0
self.max_net_rate = 10.0
# Initialize stat polling columns based on global polling
# preferences (we want signal handlers for this)
self.enable_polling(COL_GUEST_CPU)
self.enable_polling(COL_DISK)
self.enable_polling(COL_NETWORK)
self.enable_polling(COL_MEM)
# Select first list entry
vmlist = self.widget("vm-list")
if len(vmlist.get_model()) != 0:
vmlist.get_selection().select_iter(
vmlist.get_model().get_iter_first())
# Queue up the default connection detector
self.idle_emit("add-default-conn")
##################
# Common methods #
##################
def show(self):
vis = self.is_visible()
self.topwin.present()
if vis:
return
logging.debug("Showing manager")
if self.prev_position:
self.topwin.move(*self.prev_position)
self.prev_position = None
self.emit("manager-opened")
def close(self, src_ignore=None, src2_ignore=None):
if not self.is_visible():
return
logging.debug("Closing manager")
self.prev_position = self.topwin.get_position()
self.topwin.hide()
self.emit("manager-closed")
return 1
def _cleanup(self):
self.rows = None
self.diskcol = None
self.guestcpucol = None
self.memcol = None
self.hostcpucol = None
self.netcol = None
self.vmmenu.destroy() # pylint: disable=E1101
self.vmmenu = None
self.connmenu.destroy()
self.connmenu = None
self.connmenu_items = None
def is_visible(self):
return bool(self.topwin.get_visible())
def set_startup_error(self, msg):
self.widget("vm-notebook").set_current_page(1)
self.widget("startup-error-label").set_text(msg)
################
# Init methods #
################
def init_stats(self):
self.add_gconf_handle(
self.config.on_vmlist_guest_cpu_usage_visible_changed(
self.toggle_guest_cpu_usage_visible_widget))
self.add_gconf_handle(
self.config.on_vmlist_host_cpu_usage_visible_changed(
self.toggle_host_cpu_usage_visible_widget))
self.add_gconf_handle(
self.config.on_vmlist_memory_usage_visible_changed(
self.toggle_memory_usage_visible_widget))
self.add_gconf_handle(
self.config.on_vmlist_disk_io_visible_changed(
self.toggle_disk_io_visible_widget))
self.add_gconf_handle(
self.config.on_vmlist_network_traffic_visible_changed(
self.toggle_network_traffic_visible_widget))
# Register callbacks with the global stats enable/disable values
# that disable the associated vmlist widgets if reporting is disabled
self.add_gconf_handle(
self.config.on_stats_enable_cpu_poll_changed(
self.enable_polling, COL_GUEST_CPU))
self.add_gconf_handle(
self.config.on_stats_enable_disk_poll_changed(
self.enable_polling, COL_DISK))
self.add_gconf_handle(
self.config.on_stats_enable_net_poll_changed(
self.enable_polling, COL_NETWORK))
self.add_gconf_handle(
self.config.on_stats_enable_memory_poll_changed(
self.enable_polling, COL_MEM))
self.toggle_guest_cpu_usage_visible_widget()
self.toggle_host_cpu_usage_visible_widget()
self.toggle_memory_usage_visible_widget()
self.toggle_disk_io_visible_widget()
self.toggle_network_traffic_visible_widget()
def init_toolbar(self):
self.widget("vm-new").set_icon_name("vm_new")
self.widget("vm-open").set_icon_name("icon_console")
menu = vmmenu.VMShutdownMenu(self, self.current_vm)
self.widget("vm-shutdown").set_icon_name("system-shutdown")
self.widget("vm-shutdown").set_menu(menu)
tool = self.widget("vm-toolbar")
tool.set_property("icon-size", Gtk.IconSize.LARGE_TOOLBAR)
for c in tool.get_children():
c.set_homogeneous(False)
def init_context_menus(self):
def add_to_menu(idx, text, icon, cb):
if text[0:3] == 'gtk':
item = Gtk.ImageMenuItem.new_from_stock(text, None)
else:
item = Gtk.ImageMenuItem.new_with_mnemonic(text)
if icon:
item.set_image(icon)
if cb:
item.connect("activate", cb)
self.connmenu.add(item)
self.connmenu_items[idx] = item
# Build connection context menu
add_to_menu("create", Gtk.STOCK_NEW, None, self.new_vm)
add_to_menu("connect", Gtk.STOCK_CONNECT, None, self.open_conn)
add_to_menu("disconnect", Gtk.STOCK_DISCONNECT, None,
self.close_conn)
self.connmenu.add(Gtk.SeparatorMenuItem())
add_to_menu("delete", Gtk.STOCK_DELETE, None, self.do_delete)
self.connmenu.add(Gtk.SeparatorMenuItem())
add_to_menu("details", _("D_etails"), None, self.show_host)
self.connmenu.show_all()
def init_vmlist(self):
vmlist = self.widget("vm-list")
self.widget("vm-notebook").set_show_tabs(False)
rowtypes = []
rowtypes.insert(ROW_HANDLE, object) # backing object
rowtypes.insert(ROW_SORT_KEY, str) # object name
rowtypes.insert(ROW_MARKUP, str) # row markup text
rowtypes.insert(ROW_STATUS_ICON, str) # status icon name
rowtypes.insert(ROW_HINT, str) # row tooltip
rowtypes.insert(ROW_IS_CONN, bool) # if object is a connection
rowtypes.insert(ROW_IS_CONN_CONNECTED, bool) # if conn is connected
rowtypes.insert(ROW_IS_VM, bool) # if row is VM
rowtypes.insert(ROW_IS_VM_RUNNING, bool) # if VM is running
rowtypes.insert(ROW_COLOR, str) # row markup color string
rowtypes.insert(ROW_INSPECTION_OS_ICON, GdkPixbuf.Pixbuf) # OS icon
model = Gtk.TreeStore(*rowtypes)
vmlist.set_model(model)
vmlist.set_tooltip_column(ROW_HINT)
vmlist.set_headers_visible(True)
vmlist.set_level_indentation(
-(_style_get_prop(vmlist, "expander-size") + 3))
nameCol = Gtk.TreeViewColumn(_("Name"))
nameCol.set_expand(True)
nameCol.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
nameCol.set_spacing(6)
nameCol.set_sort_column_id(COL_NAME)
vmlist.append_column(nameCol)
status_icon = Gtk.CellRendererPixbuf()
status_icon.set_property("stock-size", Gtk.IconSize.DND)
nameCol.pack_start(status_icon, False)
nameCol.add_attribute(status_icon, 'icon-name', ROW_STATUS_ICON)
nameCol.add_attribute(status_icon, 'visible', ROW_IS_VM)
inspection_os_icon = Gtk.CellRendererPixbuf()
nameCol.pack_start(inspection_os_icon, False)
nameCol.add_attribute(inspection_os_icon, 'pixbuf',
ROW_INSPECTION_OS_ICON)
nameCol.add_attribute(inspection_os_icon, 'visible', ROW_IS_VM)
name_txt = Gtk.CellRendererText()
nameCol.pack_start(name_txt, True)
nameCol.add_attribute(name_txt, 'markup', ROW_MARKUP)
nameCol.add_attribute(name_txt, 'foreground', ROW_COLOR)
self.spacer_txt = Gtk.CellRendererText()
self.spacer_txt.set_property("ypad", 4)
self.spacer_txt.set_property("visible", False)
nameCol.pack_end(self.spacer_txt, False)
def make_stats_column(title, colnum):
col = Gtk.TreeViewColumn(title)
col.set_min_width(140)
txt = Gtk.CellRendererText()
txt.set_property("ypad", 4)
col.pack_start(txt, True)
col.add_attribute(txt, 'visible', ROW_IS_CONN)
img = CellRendererSparkline()
img.set_property("xpad", 6)
img.set_property("ypad", 12)
img.set_property("reversed", True)
col.pack_start(img, True)
col.add_attribute(img, 'visible', ROW_IS_VM)
col.set_sort_column_id(colnum)
vmlist.append_column(col)
return col
self.guestcpucol = make_stats_column(_("CPU usage"), COL_GUEST_CPU)
self.hostcpucol = make_stats_column(_("Host CPU usage"), COL_HOST_CPU)
self.memcol = make_stats_column(_("Memory usage"), COL_MEM)
self.diskcol = make_stats_column(_("Disk I/O"), COL_DISK)
self.netcol = make_stats_column(_("Network I/O"), COL_NETWORK)
model.set_sort_func(COL_NAME, self.vmlist_name_sorter)
model.set_sort_func(COL_GUEST_CPU, self.vmlist_guest_cpu_usage_sorter)
model.set_sort_func(COL_HOST_CPU, self.vmlist_host_cpu_usage_sorter)
model.set_sort_func(COL_MEM, self.vmlist_memory_usage_sorter)
model.set_sort_func(COL_DISK, self.vmlist_disk_io_sorter)
model.set_sort_func(COL_NETWORK, self.vmlist_network_usage_sorter)
model.set_sort_column_id(COL_NAME, Gtk.SortType.ASCENDING)
##################
# Helper methods #
##################
def current_row(self):
return uiutil.get_list_selection(self.widget("vm-list"))
def current_vm(self):
row = self.current_row()
if not row or row[ROW_IS_CONN]:
return None
return row[ROW_HANDLE]
def current_conn(self):
row = self.current_row()
if not row:
return None
handle = row[ROW_HANDLE]
if row[ROW_IS_CONN]:
return handle
else:
return handle.conn
def current_vmuuid(self):
vm = self.current_vm()
if vm is None:
return None
return vm.get_uuid()
def current_conn_uri(self, default_selection=False):
vmlist = self.widget("vm-list")
model = vmlist.get_model()
conn = self.current_conn()
if conn is None and default_selection:
# Nothing selected, use first connection row
for row in model:
if row[ROW_IS_CONN]:
conn = row[ROW_HANDLE]
break
if conn:
return conn.get_uri()
return None
####################
# Action listeners #
####################
def window_resized(self, ignore, event):
# Sometimes dimensions change when window isn't visible
if not self.is_visible():
return
self.config.set_manager_window_size(event.width, event.height)
def exit_app(self, src_ignore=None, src2_ignore=None):
self.emit("action-exit-app")
def new_conn(self, src_ignore=None):
self.emit("action-show-connect")
def new_vm(self, src_ignore=None):
self.emit("action-show-create", self.current_conn_uri())
def show_about(self, src_ignore):
self.emit("action-show-about")
def show_preferences(self, src_ignore):
self.emit("action-show-preferences")
def show_host(self, src_ignore):
uri = self.current_conn_uri(default_selection=True)
self.emit("action-show-host", uri)
def show_vm(self, ignore, ignore2=None, ignore3=None):
conn = self.current_conn()
vm = self.current_vm()
if conn is None:
return
if vm:
self.emit("action-show-domain", conn.get_uri(), vm.get_uuid())
else:
if not self.open_conn():
self.emit("action-show-host", conn.get_uri())
def do_delete(self, ignore=None):
conn = self.current_conn()
vm = self.current_vm()
if vm is None:
self._do_delete_conn(conn)
else:
self.emit("action-delete-domain", conn.get_uri(), vm.get_uuid())
def _do_delete_conn(self, conn):
if conn is None:
return
result = self.err.yes_no(_("This will remove the connection:\n\n%s\n\n"
"Are you sure?") % conn.get_uri())
if not result:
return
self.emit("remove-conn", conn.get_uri())
def set_pause_state(self, state):
src = self.widget("vm-pause")
try:
self.ignore_pause = True
src.set_active(state)
finally:
self.ignore_pause = False
def pause_vm_button(self, src):
if self.ignore_pause:
return
do_pause = src.get_active()
# Set button state back to original value: just let the status
# update function fix things for us
self.set_pause_state(not do_pause)
if do_pause:
self.pause_vm(None)
else:
self.resume_vm(None)
def start_vm(self, ignore):
vm = self.current_vm()
if vm is not None:
self.emit("action-run-domain",
vm.conn.get_uri(), vm.get_uuid())
def poweroff_vm(self, ignore):
vm = self.current_vm()
if vm is not None:
self.emit("action-shutdown-domain",
vm.conn.get_uri(), vm.get_uuid())
def pause_vm(self, ignore):
vm = self.current_vm()
if vm is not None:
self.emit("action-suspend-domain",
vm.conn.get_uri(), vm.get_uuid())
def resume_vm(self, ignore):
vm = self.current_vm()
if vm is not None:
self.emit("action-resume-domain",
vm.conn.get_uri(), vm.get_uuid())
def close_conn(self, ignore):
conn = self.current_conn()
if conn.get_state() != vmmConnection.STATE_DISCONNECTED:
conn.close()
def open_conn(self, ignore=None):
conn = self.current_conn()
if conn.get_state() == vmmConnection.STATE_DISCONNECTED:
conn.open()
return True
####################################
# VM add/remove management methods #
####################################
def vm_row_key(self, vm):
return vm.get_uuid() + ":" + vm.conn.get_uri()
def vm_added(self, conn, vmuuid):
vm = conn.get_vm(vmuuid)
if self.vm_row_key(vm) in self.rows:
return
vm.connect("config-changed", self.vm_config_changed)
vm.connect("status-changed", self.vm_status_changed)
vm.connect("resources-sampled", self.vm_row_updated)
vm.connect("inspection-changed", self.vm_inspection_changed)
vmlist = self.widget("vm-list")
model = vmlist.get_model()
self._append_vm(model, vm, conn)
def vm_removed(self, conn, vmuuid):
vmlist = self.widget("vm-list")
model = vmlist.get_model()
parent = self.rows[conn.get_uri()].iter
for row in range(model.iter_n_children(parent)):
vm = model[model.iter_nth_child(parent, row)][ROW_HANDLE]
if vm.get_uuid() == vmuuid:
model.remove(model.iter_nth_child(parent, row))
del self.rows[self.vm_row_key(vm)]
break
def _build_conn_hint(self, conn):
hint = conn.get_uri()
if conn.state == conn.STATE_DISCONNECTED:
hint += " (%s)" % _("Double click to connect")
return hint
def _build_conn_markup(self, conn, name):
name = util.xml_escape(name)
text = name
if conn.state == conn.STATE_DISCONNECTED:
text += " - " + _("Not Connected")
elif conn.state == conn.STATE_CONNECTING:
text += " - " + _("Connecting...")
markup = "<span size='smaller'>%s</span>" % text
return markup
def _build_conn_color(self, conn):
color = "#000000"
if conn.state == conn.STATE_DISCONNECTED:
color = "#5b5b5b"
return color
def _build_vm_markup(self, name, status):
domtext = ("<span size='smaller' weight='bold'>%s</span>" %
util.xml_escape(name))
statetext = "<span size='smaller'>%s</span>" % status
return domtext + "\n" + statetext
def _build_row(self, conn, vm):
if conn:
name = conn.get_pretty_desc_inactive(False)
markup = self._build_conn_markup(conn, name)
status = ("<span size='smaller'>%s</span>" %
conn.get_state_text())
status_icon = None
hint = self._build_conn_hint(conn)
color = self._build_conn_color(conn)
os_icon = None
else:
name = vm.get_name_or_title()
status = vm.run_status()
markup = self._build_vm_markup(name, status)
status_icon = vm.run_status_icon_name()
hint = vm.get_description()
color = None
os_icon = _get_inspection_icon_pixbuf(vm, 16, 16)
row = []
row.insert(ROW_HANDLE, conn or vm)
row.insert(ROW_SORT_KEY, name)
row.insert(ROW_MARKUP, markup)
row.insert(ROW_STATUS_ICON, status_icon)
row.insert(ROW_HINT, util.xml_escape(hint))
row.insert(ROW_IS_CONN, bool(conn))
row.insert(ROW_IS_CONN_CONNECTED,
bool(conn) and conn.state != conn.STATE_DISCONNECTED)
row.insert(ROW_IS_VM, bool(vm))
row.insert(ROW_IS_VM_RUNNING, bool(vm) and vm.is_active())
row.insert(ROW_COLOR, color)
row.insert(ROW_INSPECTION_OS_ICON, os_icon)
return row
def _append_vm(self, model, vm, conn):
row_key = self.vm_row_key(vm)
if row_key in self.rows:
return
row = self._build_row(None, vm)
parent = self.rows[conn.get_uri()].iter
_iter = model.append(parent, row)
path = model.get_path(_iter)
self.rows[row_key] = model[path]
# Expand a connection when adding a vm to it
self.widget("vm-list").expand_row(model.get_path(parent), False)
def _append_conn(self, model, conn):
row = self._build_row(conn, None)
_iter = model.append(None, row)
path = model.get_path(_iter)
self.rows[conn.get_uri()] = model[path]
return _iter
def add_conn(self, engine_ignore, conn):
# Make sure error page isn't showing
self.widget("vm-notebook").set_current_page(0)
if conn.get_uri() in self.rows:
return
conn.connect("vm-added", self.vm_added)
conn.connect("vm-removed", self.vm_removed)
conn.connect("resources-sampled", self.conn_row_updated)
conn.connect("state-changed", self.conn_state_changed)
# add the connection to the treeModel
vmlist = self.widget("vm-list")
row = self._append_conn(vmlist.get_model(), conn)
vmlist.get_selection().select_iter(row)
# Try to make sure that 2 row descriptions don't collide
connrows = []
descs = []
for row in self.rows.values():
if row[ROW_IS_CONN]:
connrows.append(row)
for row in connrows:
descs.append(row[ROW_SORT_KEY])
for row in connrows:
conn = row[ROW_HANDLE]
name = row[ROW_SORT_KEY]
if descs.count(name) <= 1:
continue
newname = conn.get_pretty_desc_inactive(False, True)
self.conn_state_changed(conn, newname=newname)
def remove_conn(self, engine_ignore, uri):
model = self.widget("vm-list").get_model()
parent = self.rows[uri].iter
if parent is None:
return
child = model.iter_children(parent)
while child is not None:
del self.rows[self.vm_row_key(model[child][ROW_HANDLE])]
model.remove(child)
child = model.iter_children(parent)
model.remove(parent)
del self.rows[uri]
#############################
# State/UI updating methods #
#############################
def vm_row_updated(self, vm):
row = self.rows.get(self.vm_row_key(vm), None)
if row is None:
return
self.widget("vm-list").get_model().row_changed(row.path, row.iter)
def vm_config_changed(self, vm):
row = self.rows.get(self.vm_row_key(vm), None)
if row is None:
return
try:
name = vm.get_name_or_title()
status = vm.run_status()
row[ROW_SORT_KEY] = name
row[ROW_STATUS_ICON] = vm.run_status_icon_name()
row[ROW_IS_VM_RUNNING] = vm.is_active()
row[ROW_MARKUP] = self._build_vm_markup(name, status)
desc = vm.get_description()
if not uiutil.can_set_row_none:
desc = desc or ""
row[ROW_HINT] = util.xml_escape(desc)
except libvirt.libvirtError, e:
if util.exception_is_libvirt_error(e, "VIR_ERR_NO_DOMAIN"):
return
raise
self.vm_row_updated(vm)
def vm_status_changed(self, vm, oldstatus, newstatus):
ignore = newstatus
ignore = oldstatus
parent = self.rows[vm.conn.get_uri()].iter
vmlist = self.widget("vm-list")
model = vmlist.get_model()
missing = True
for row in range(model.iter_n_children(parent)):
_iter = model.iter_nth_child(parent, row)
if model[_iter][ROW_HANDLE] == vm:
missing = False
break
if missing:
self._append_vm(model, vm, vm.conn)
# Update run/shutdown/pause button states
self.update_current_selection()
self.vm_config_changed(vm)
def vm_inspection_changed(self, vm):
row = self.rows.get(self.vm_row_key(vm), None)
if row is None:
return
new_icon = _get_inspection_icon_pixbuf(vm, 16, 16)
if not uiutil.can_set_row_none:
new_icon = new_icon or ""
row[ROW_INSPECTION_OS_ICON] = new_icon
self.vm_row_updated(vm)
def conn_state_changed(self, conn, newname=None):
row = self.rows[conn.get_uri()]
if newname:
row[ROW_SORT_KEY] = newname
row[ROW_MARKUP] = self._build_conn_markup(conn, row[ROW_SORT_KEY])
row[ROW_IS_CONN_CONNECTED] = conn.state != conn.STATE_DISCONNECTED
row[ROW_COLOR] = self._build_conn_color(conn)
row[ROW_HINT] = self._build_conn_hint(conn)
if conn.get_state() in [vmmConnection.STATE_DISCONNECTED,
vmmConnection.STATE_CONNECTING]:
# Connection went inactive, delete any VM child nodes
parent = row.iter
if parent is not None:
model = self.widget("vm-list").get_model()
child = model.iter_children(parent)
while child is not None:
vm = model[child][ROW_HANDLE]
del self.rows[self.vm_row_key(vm)]
model.remove(child)
child = model.iter_children(parent)
self.conn_row_updated(conn)
self.update_current_selection()
def conn_row_updated(self, conn):
row = self.rows[conn.get_uri()]
self.max_disk_rate = max(self.max_disk_rate, conn.disk_io_max_rate())
self.max_net_rate = max(self.max_net_rate,
conn.network_traffic_max_rate())
self.widget("vm-list").get_model().row_changed(row.path, row.iter)
def change_run_text(self, can_restore):
if can_restore:
text = _("_Restore")
else:
text = _("_Run")
strip_text = text.replace("_", "")
self.vmmenu.change_run_text(text)
self.widget("vm-run").set_label(strip_text)
def update_current_selection(self, ignore=None):
vm = self.current_vm()
show_open = bool(vm)
show_details = bool(vm)
host_details = bool(len(self.rows))
show_run = bool(vm and vm.is_runable())
is_paused = bool(vm and vm.is_paused())
if is_paused:
show_pause = bool(vm and vm.is_unpauseable())
else:
show_pause = bool(vm and vm.is_pauseable())
show_shutdown = bool(vm and vm.is_stoppable())
if vm and vm.managedsave_supported:
self.change_run_text(vm.hasSavedImage())
self.widget("vm-open").set_sensitive(show_open)
self.widget("vm-run").set_sensitive(show_run)
self.widget("vm-shutdown").set_sensitive(show_shutdown)
self.widget("vm-shutdown").get_menu().update_widget_states(vm)
self.set_pause_state(is_paused)
self.widget("vm-pause").set_sensitive(show_pause)
self.widget("menu_edit_details").set_sensitive(show_details)
self.widget("menu_host_details").set_sensitive(host_details)
def popup_vm_menu_key(self, widget_ignore, event):
if Gdk.keyval_name(event.keyval) != "Menu":
return False
model, treeiter = self.widget("vm-list").get_selection().get_selected()
self.popup_vm_menu(model, treeiter, event)
return True
def popup_vm_menu_button(self, widget, event):
if event.button != 3:
return False
tup = widget.get_path_at_pos(int(event.x), int(event.y))
if tup is None:
return False
path = tup[0]
model = widget.get_model()
_iter = model.get_iter(path)
self.popup_vm_menu(model, _iter, event)
return False
def popup_vm_menu(self, model, _iter, event):
if model.iter_parent(_iter) is not None:
# Popup the vm menu
vm = model[_iter][ROW_HANDLE]
self.vmmenu.update_widget_states(vm)
self.vmmenu.popup( # pylint: disable=E1101
None, None, None, None, 0, event.time)
else:
# Pop up connection menu
conn = model[_iter][ROW_HANDLE]
disconn = (conn.get_state() == vmmConnection.STATE_DISCONNECTED)
conning = (conn.get_state() == vmmConnection.STATE_CONNECTING)
self.connmenu_items["create"].set_sensitive(not disconn)
self.connmenu_items["disconnect"].set_sensitive(not (disconn or
conning))
self.connmenu_items["connect"].set_sensitive(disconn)
self.connmenu_items["delete"].set_sensitive(disconn)
self.connmenu.popup(None, None, None, None, 0, event.time)
#################
# Stats methods #
#################
def vmlist_name_sorter(self, model, iter1, iter2, ignore):
return cmp(model[iter1][ROW_SORT_KEY], model[iter2][ROW_SORT_KEY])
def vmlist_guest_cpu_usage_sorter(self, model, iter1, iter2, ignore):
obj1 = model[iter1][ROW_HANDLE]
obj2 = model[iter2][ROW_HANDLE]
return cmp(obj1.guest_cpu_time_percentage(),
obj2.guest_cpu_time_percentage())
def vmlist_host_cpu_usage_sorter(self, model, iter1, iter2, ignore):
obj1 = model[iter1][ROW_HANDLE]
obj2 = model[iter2][ROW_HANDLE]
return cmp(obj1.host_cpu_time_percentage(),
obj2.host_cpu_time_percentage())
def vmlist_memory_usage_sorter(self, model, iter1, iter2, ignore):
obj1 = model[iter1][ROW_HANDLE]
obj2 = model[iter2][ROW_HANDLE]
return cmp(obj1.stats_memory(),
obj2.stats_memory())
def vmlist_disk_io_sorter(self, model, iter1, iter2, ignore):
obj1 = model[iter1][ROW_HANDLE]
obj2 = model[iter2][ROW_HANDLE]
return cmp(obj1.disk_io_rate(), obj2.disk_io_rate())
def vmlist_network_usage_sorter(self, model, iter1, iter2, ignore):
obj1 = model[iter1][ROW_HANDLE]
obj2 = model[iter2][ROW_HANDLE]
return cmp(obj1.network_traffic_rate(), obj2.network_traffic_rate())
def enable_polling(self, column):
if column == COL_GUEST_CPU:
widgn = ["menu_view_stats_guest_cpu", "menu_view_stats_host_cpu"]
do_enable = self.config.get_stats_enable_cpu_poll()
if column == COL_DISK:
widgn = "menu_view_stats_disk"
do_enable = self.config.get_stats_enable_disk_poll()
elif column == COL_NETWORK:
widgn = "menu_view_stats_network"
do_enable = self.config.get_stats_enable_net_poll()
elif column == COL_MEM:
widgn = "menu_view_stats_memory"
do_enable = self.config.get_stats_enable_memory_poll()
for w in util.listify(widgn):
widget = self.widget(w)
tool_text = ""
if do_enable:
widget.set_sensitive(True)
else:
if widget.get_active():
widget.set_active(False)
widget.set_sensitive(False)
tool_text = _("Disabled in preferences dialog.")
widget.set_tooltip_text(tool_text)
def _toggle_graph_helper(self, do_show, col, datafunc, menu):
img = -1
for child in col.get_cells():
if isinstance(child, CellRendererSparkline):
img = child
datafunc = do_show and datafunc or None
col.set_cell_data_func(img, datafunc, None)
col.set_visible(do_show)
self.widget(menu).set_active(do_show)
any_visible = any([c.get_visible() for c in
[self.netcol, self.diskcol, self.memcol,
self.guestcpucol, self.hostcpucol]])
self.spacer_txt.set_property("visible", not any_visible)
def toggle_network_traffic_visible_widget(self):
self._toggle_graph_helper(
self.config.is_vmlist_network_traffic_visible(), self.netcol,
self.network_traffic_img, "menu_view_stats_network")
def toggle_disk_io_visible_widget(self):
self._toggle_graph_helper(
self.config.is_vmlist_disk_io_visible(), self.diskcol,
self.disk_io_img, "menu_view_stats_disk")
def toggle_memory_usage_visible_widget(self):
self._toggle_graph_helper(
self.config.is_vmlist_memory_usage_visible(), self.memcol,
self.memory_usage_img, "menu_view_stats_memory")
def toggle_guest_cpu_usage_visible_widget(self):
self._toggle_graph_helper(
self.config.is_vmlist_guest_cpu_usage_visible(), self.guestcpucol,
self.guest_cpu_usage_img, "menu_view_stats_guest_cpu")
def toggle_host_cpu_usage_visible_widget(self):
self._toggle_graph_helper(
self.config.is_vmlist_host_cpu_usage_visible(), self.hostcpucol,
self.host_cpu_usage_img, "menu_view_stats_host_cpu")
def toggle_stats_visible(self, src, stats_id):
visible = src.get_active()
set_stats = {
COL_GUEST_CPU: self.config.set_vmlist_guest_cpu_usage_visible,
COL_HOST_CPU: self.config.set_vmlist_host_cpu_usage_visible,
COL_MEM: self.config.set_vmlist_memory_usage_visible,
COL_DISK: self.config.set_vmlist_disk_io_visible,
COL_NETWORK: self.config.set_vmlist_network_traffic_visible,
}
set_stats[stats_id](visible)
def toggle_stats_visible_guest_cpu(self, src):
self.toggle_stats_visible(src, COL_GUEST_CPU)
def toggle_stats_visible_host_cpu(self, src):
self.toggle_stats_visible(src, COL_HOST_CPU)
def toggle_stats_visible_memory_usage(self, src):
self.toggle_stats_visible(src, COL_MEM)
def toggle_stats_visible_disk(self, src):
self.toggle_stats_visible(src, COL_DISK)
def toggle_stats_visible_network(self, src):
self.toggle_stats_visible(src, COL_NETWORK)
def guest_cpu_usage_img(self, column_ignore, cell, model, _iter, data):
obj = model[_iter][ROW_HANDLE]
if obj is None or not hasattr(obj, "conn"):
return
data = obj.guest_cpu_time_vector_limit(GRAPH_LEN)
cell.set_property('data_array', data)
def host_cpu_usage_img(self, column_ignore, cell, model, _iter, data):
obj = model[_iter][ROW_HANDLE]
if obj is None or not hasattr(obj, "conn"):
return
data = obj.host_cpu_time_vector_limit(GRAPH_LEN)
cell.set_property('data_array', data)
def memory_usage_img(self, column_ignore, cell, model, _iter, data):
obj = model[_iter][ROW_HANDLE]
if obj is None or not hasattr(obj, "conn"):
return
data = obj.memory_usage_vector_limit(GRAPH_LEN)
cell.set_property('data_array', data)
def disk_io_img(self, column_ignore, cell, model, _iter, data):
obj = model[_iter][ROW_HANDLE]
if obj is None or not hasattr(obj, "conn"):
return
data = obj.disk_io_vector_limit(GRAPH_LEN, self.max_disk_rate)
cell.set_property('data_array', data)
def network_traffic_img(self, column_ignore, cell, model, _iter, data):
obj = model[_iter][ROW_HANDLE]
if obj is None or not hasattr(obj, "conn"):
return
data = obj.network_traffic_vector_limit(GRAPH_LEN, self.max_net_rate)
cell.set_property('data_array', data)
| gpl-2.0 | 6,353,689,664,977,136,000 | 35.098743 | 84 | 0.584946 | false |
Suwings/Suwings.github.io | mine/parallel time/Oftenscript/EVE 爬虫/People.py | 1 | 1818 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import pymysql
global DB
try:
DB = pymysql.connect("localhost", "root", "toortoor", "EVE_coll")
except:
print("数据库连接失败,程序停止.")
exit(0)
def insert_now_data(id, now_time, players, typeof):
""" 插入数据 1 代表国服 """
cursor = DB.cursor()
# SQL 插入语句
if typeof == 1:
sql = "insert into eve_players_sr values(%d,'%s',%d);" % (
id, now_time, players)
else:
sql = "insert into eve_players_tq values(%d,'%s',%d);" % (
id, now_time, players)
try:
cursor.execute(sql)
# 提交到数据库执行
DB.commit()
except Exception as err:
# 如果发生错误则回滚
DB.rollback()
print(err)
# 逻辑开始
import requests
import time
import json
callback_f = 'jQuery112305552559905082075_1539584725440'
while True:
try:
res = requests.get(
"https://www.ceve-market.org/serverStatus?callback="+callback_f)
res_data = res.text
res_data = res_data.replace(callback_f, '')
res_data = res_data.replace('(', '')
res_data = res_data.replace(')', '')
res_data = res_data.replace(';', '')
res_obj = json.loads(res_data)
sr_player_count = res_obj['sr']
tq_player_count = res_obj['tq']
now_time = time.strftime("%Y/%m/%d %H:%M:00")
id_time = int(time.time())
print("["+str(now_time)+"] 欧服:" + str(tq_player_count) + " | 国服:" + str(sr_player_count) +
"\n")
insert_now_data(id_time, now_time, sr_player_count, 1)
insert_now_data(id_time, now_time, tq_player_count, 2)
time.sleep(60)
except Exception as err:
print("错误:")
print(err)
| mit | 7,974,478,577,590,120,000 | 23.169014 | 98 | 0.544872 | false |
mpatacchiola/deepgaze | examples/ex_cnn_head_pose_estimation_images/ex_cnn_head_pose_estimation_images.py | 1 | 1832 | #!/usr/bin/env python
#The MIT License (MIT)
#Copyright (c) 2016 Massimiliano Patacchiola
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import tensorflow as tf
import cv2
from deepgaze.head_pose_estimation import CnnHeadPoseEstimator
sess = tf.Session() #Launch the graph in a session.
my_head_pose_estimator = CnnHeadPoseEstimator(sess) #Head pose estimation object
# Load the weights from the configuration folders
my_head_pose_estimator.load_roll_variables(os.path.realpath("../../etc/tensorflow/head_pose/roll/cnn_cccdd_30k.tf"))
my_head_pose_estimator.load_pitch_variables(os.path.realpath("../../etc/tensorflow/head_pose/pitch/cnn_cccdd_30k.tf"))
my_head_pose_estimator.load_yaw_variables(os.path.realpath("../../etc/tensorflow/head_pose/yaw/cnn_cccdd_30k.tf"))
for i in range(1,9):
file_name = str(i) + ".jpg"
print("Processing image ..... " + file_name)
image = cv2.imread(file_name) #Read the image with OpenCV
# Get the angles for roll, pitch and yaw
roll = my_head_pose_estimator.return_roll(image) # Evaluate the roll angle using a CNN
pitch = my_head_pose_estimator.return_pitch(image) # Evaluate the pitch angle using a CNN
yaw = my_head_pose_estimator.return_yaw(image) # Evaluate the yaw angle using a CNN
print("Estimated [roll, pitch, yaw] ..... [" + str(roll[0,0,0]) + "," + str(pitch[0,0,0]) + "," + str(yaw[0,0,0]) + "]")
print("")
| mit | 4,694,986,519,245,700,000 | 54.515152 | 141 | 0.728166 | false |
Maigard/coopshop | models.py | 1 | 15696 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.core.exceptions import ValidationError
from django.contrib.localflavor.us.models import PhoneNumberField,USStateField
from sorl.thumbnail import ImageField
from decimal import *
import datetime
import stripe
TWOPLACES=Decimal(10) ** -2
class CycleException(Exception):
pass
class Cycle(models.Model):
date = models.DateField()
delivery = models.DateField()
class Meta:
ordering = ["date"]
@classmethod
def getCurrentCycle(cls):
try:
return cls.objects.filter(date__gte = datetime.date.today()).order_by("date")[0]
except IndexError:
raise CycleException()
def __unicode__(self):
return str(self.date)
class Producer(models.Model):
name = models.CharField(max_length=128)
contact = models.ForeignKey(User)
#Product types
about = models.TextField(help_text="Html and <a href='http://en.wikipedia.org/wiki/Markdown'>markdown</a> are allowed")
address = models.CharField(max_length=128)
city = models.CharField(max_length=64)
zip = models.CharField(max_length=10)
state = USStateField()
phone = PhoneNumberField()
email = models.EmailField()
website = models.URLField()
active = models.BooleanField(default=True)
image = ImageField(upload_to="producers")
markup = models.DecimalField(max_digits=10,decimal_places=3, blank=True, null=True)
leadTime = models.IntegerField(default=0)
@models.permalink
def get_absolute_url(self):
return ('coopshop.views.producer', [str(self.id)])
def __unicode__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=64)
active = models.BooleanField(default=True)
image = ImageField(upload_to="categories")
@models.permalink
def get_absolute_url(self):
return ('coopshop.views.category', [str(self.id)])
class Meta:
verbose_name_plural = "Categories"
ordering = ["name"]
def __unicode__(self):
return self.name
class ProductCycle(models.Model):
product = models.ForeignKey('Product')
cycle = models.ForeignKey('Cycle')
quantity = models.IntegerField()
class Unit(models.Model):
name = models.CharField(max_length=32, unique=True)
def __unicode__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=64)
size = models.DecimalField(max_digits=10,decimal_places=3, blank=True, null=True)
#unit = models.CharField(max_length=32, blank=True, null=True, choices = (
# ("pound", "pound"),
# ("gallon", "gallon"),
# ("dozen", "dozen"),
# ("half dozen", "half dozen"),
# ("each", "each"),
# ("bundles", "bundles"),
# ("box", "box"),
# ("carton", "carton"),
# ("bag", "bag"),
# ("ounces", "ounces"),
# ("liters", "liters"),
# ("","")))
unit = models.ForeignKey(Unit)
description = models.TextField(help_text="Html and <a href='http://en.wikipedia.org/wiki/Markdown'>markdown</a> are allowed")
image = ImageField(upload_to="products", blank=True, null=True, help_text="If an image is not provided, the category image will be used in its place")
category = models.ForeignKey(Category)
producer = models.ForeignKey(Producer)
membershipPayment = models.BooleanField(verbose_name="Membership Payment", help_text="If selected, the item price is applied toward a membership")
membershipExtension = models.IntegerField(blank=True, null=True, verbose_name="Membership Extension", help_text="If this item is a membership Item, the number of days this item extends the user's membership")
taxable = models.BooleanField()
active = models.BooleanField(default=True)
wholesalePrice = models.DecimalField(verbose_name="Wholesale Price", max_digits=10,decimal_places=2, help_text="Wholesale price the Coop pays to the producer")
markup = models.DecimalField(max_digits=10,decimal_places=3, blank=True, null=True, help_text="Markup to apply to the wholesale price. If this isn't set, the producer markup is used")
minimumPrice = models.DecimalField(verbose_name="Minimum Price", max_digits=10,decimal_places=2, blank=True, null=True, help_text="Minimum price that the product will be sold for")
leadTime = models.IntegerField(verbose_name="Lead Time", blank=True, null=True, help_text="Number of days before the end of the cycle that the product will become unavailable")
unlimitedQuantity = models.BooleanField(verbose_name="Unlimited Quantity", help_text="Item doesn't run out of stock")
cycles = models.ManyToManyField(Cycle, through=ProductCycle)
class Meta:
ordering = ["name"]
def get_image(self):
if self.image:
return self.image
else:
return self.category.image
def get_leadTime(self):
leadTime = self.leadTime
if leadTime == None:
leadTime = self.producer.leadTime
return leadTime
def get_orderByDate(self):
cycle = Cycle.getCurrentCycle()
return cycle.date - datetime.timedelta(self.get_leadTime())
def get_price(self):
markup = self.markup
if markup == None:
markup = self.producer.markup
if markup == None:
markup = Decimal(Setting.objects.get(key = "markup").value)
price = (self.wholesalePrice * (markup + 1)).quantize(TWOPLACES)
if self.minimumPrice != False and price < self.minimumPrice:
return self.minimumPrice
else:
return price
def get_quantity(self, date = None): #todo get quantity at a future date
if date == None:
cycle = Cycle.getCurrentCycle()
if cycle.date - datetime.timedelta(self.get_leadTime()) < datetime.date.today():
return 0
else:
try:
return ProductCycle.objects.get(cycle = cycle, product = self).quantity
except:
return 0
def get_remaining(self):
startingQuantity = self.get_quantity()
numOrdered = OrderItem.objects.filter(order__cycle = Cycle.getCurrentCycle(), product = self).aggregate(models.Sum("quantity"))["quantity__sum"]
try:
return startingQuantity - numOrdered
except TypeError:
return startingQuantity
@models.permalink
def get_absolute_url(self):
return ('coopshop.views.product', [str(self.id)])
def __unicode__(self):
return "%s (%s)" % (self.name, self.producer.name)
class ChargeError(Exception):
pass
class Order(models.Model):
date = models.DateTimeField(auto_now_add=True)
customer = models.ForeignKey(User)
products = models.ManyToManyField(Product, through="OrderItem")
subtotal = models.DecimalField(max_digits=10,decimal_places=2,blank=True)
tax = models.DecimalField(max_digits=10,decimal_places=2,blank=True)
total = models.DecimalField(max_digits=10,decimal_places=2,blank=True)
cycle = models.ForeignKey(Cycle)
paid = models.BooleanField(default=False)
delivered = models.BooleanField(default=False)
paymentId = models.CharField(max_length=32,default=False, null=True)
nonmemberFee = models.DecimalField(verbose_name="Nonmember Fee", max_digits=10,decimal_places=2,blank=True)
deliveryFee = models.DecimalField(verbose_name="Delivery Fee", max_digits=10,decimal_places=2,blank=True)
processingFee = models.DecimalField(verbose_name="Processing Fee", max_digits=10,decimal_places=2,blank=True,default=0)
def charge(self):
if self.paid == True:
raise ValidationError("Can't charge and order more than once")
self.update_totals()
stripe.api_key = Setting.objects.get(key="Stripe Secret Key").value
profile = self.customer.get_profile()
customer = profile.stripeId
try:
charge = stripe.Charge.create( amount=int(self.total*100),
currency="usd",
customer=customer)
except Esception, e:
raise ChargeError(e)
if charge:
memberItems = OrderItem.objects.filter(order = self.id, product__membershipPayment = True)
if len(memberItems) > 0:
profile = self.customer.get_profile()
profile.membershipBalance += sum([orderItem.price * orderItem.quantity for orderItem in memberItems])
try:
profile.membershipExpires += datetime.timedelta(days=sum([orderItem.product.membershipExtension * orderItem.quantity for orderItem in memberItems]))
except TypeError:
profile.membershipExpires = datetime.date.today() + datetime.timedelta(days=int(sum([orderItem.product.membershipExtension * orderItem.quantity for orderItem in memberItems])))
profile.save()
self.paid = True
self.paymentId = charge["id"]
self.processingFee = Decimal(charge["fee"]/100.0).quantize(TWOPLACES)
self.save()
def fullRefund(self):
if not self.paid or self.paymentId == None:
raise ValidationError("Can't refund an order that hasn't been paid")
stripe.api_key = Setting.objects.get(key="Stripe Secret Key").value
try:
charge = stripe.Charge.retrieve(self.paymentId)
charge.refund()
self.paid = False
memberItems = OrderItem.objects.filter(order = self.id, product__membershipPayment = True)
if len(memberItems) > 0:
profile = self.customer.get_profile()
profile.membershipBalance -= sum([orderItem.price * orderItem.quantity for orderItem in memberItems])
profile.membershipExpires -= datetime.timedelta(days=sum([orderItem.product.membershipExtension * orderItem.quantity for orderItem in memberItems]))
profile.save()
except Exception, e:
raise ChargeError(e)
self.save()
def refundDifference(self):
if not self.paid or self.paymentId == None:
raise ValidationError("Can't refund an order that hasn't been paid")
stripe.api_key = Setting.objects.get(key="Stripe Secret Key").value
try:
charge = stripe.Charge.retrieve(self.paymentId)
refundAmount = charge["amount"] - charge["amount_refunded"] - int(self.total * 100)
if refundAmount > 0:
charge.refund(amount = refundAmount)
except Exception, e:
raise ChargeError(e)
def update_totals(self):
if not self.customer.get_profile().is_member() and OrderItem.objects.filter(order = self.id, product__membershipPayment = True).count() > 0:
self.nonmemberFee = Decimal(Setting.objects.get(key="Nonmember Fee").value).quantize(TWOPLACES)
else:
self.nonmemberFee = 0
self.deliveryFee = 0
self.subtotal = sum([(product.price * product.quantity).quantize(TWOPLACES) for product in OrderItem.objects.filter(order = self.id)])
tax = Decimal(Setting.objects.get(key = "tax").value)
self.tax = sum([(product.price * product.quantity * tax).quantize(TWOPLACES) for product in OrderItem.objects.filter(order = self.id, product__taxable = True)])
self.total = self.subtotal + self.tax + self.deliveryFee + self.nonmemberFee
def save(self):
self.update_totals()
if self.paid == True:
dbtotal = Order.objects.get(id=self.id).total
if self.total < dbtotal:
self.refundDifference()
elif self.total > dbtotal:
raise ValidationError("Can not add to an already charged order. Create a new order")
super(Order, self).save()
def delete(self):
if self.paid == True:
self.fullRefund()
super(Order, self).delete()
@models.permalink
def get_absolute_url(self):
return ('coopshop.views.order', [str(self.id)])
class OrderItem(models.Model):
product = models.ForeignKey(Product)
order = models.ForeignKey(Order)
quantity = models.DecimalField(max_digits=10,decimal_places=2)
wholesalePrice = models.DecimalField(max_digits=10,decimal_places=2,blank=True)
price = models.DecimalField(max_digits=10,decimal_places=2,blank=True)
def save(self):
if self.wholesalePrice == None:
self.wholesalePrice = self.product.wholesalePrice
if self.price == None:
self.price = self.product.get_price()
if self.order.paid == True:
dbtotal = OrderItem.objects.get(id=self.id).price
if self.price > dbtotal:
raise ValidationError("Can not add to an already charged order. Create a new order")
super(OrderItem, self).save()
self.order.save()
def delete(self):
order = self.order
super(OrderItem, self).delete()
order.save()
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
phone = PhoneNumberField()
address1 = models.CharField(max_length=128)
address2 = models.CharField(max_length=128, null=True, blank=True)
city = models.CharField(max_length=128)
state = USStateField()
zip = models.CharField(max_length=10)
membershipExpires = models.DateField(verbose_name="Membership Expires", null=True, blank=True, help_text="When this user's membership expires")
lifetimeMember = models.BooleanField(default=False, help_text="If set, this user will always be a member")
membershipBalance = models.DecimalField(verbose_name="Membership Balance", max_digits=10, decimal_places=2, default=0, help_text="The amount this user has contributed to the Co-op")
producer = models.ForeignKey(Producer, null=True, blank=True)
stripeId = models.CharField(max_length=32, null=True, blank=True)
def is_member(self):
if self.lifetimeMember:
return True
else:
try:
return self.membershipExpires >= datetime.date.today()
except TypeError:
return False
def update_card(self, stripeToken):
stripe.api_key = Setting.objects.get(key="Stripe Secret Key").value
if self.stripeId:
try:
customer = stripe.Customer.retrieve(profile.stripeId)
customer.card = stripeToken
customer.save()
except:
self.create_charge_account(stripeToken)
else:
self.create_charge_account(stripeToken)
def get_card(self):
stripe.api_key = Setting.objects.get(key="Stripe Secret Key").value
if self.stripeId:
try:
customer = stripe.Customer.retrieve(profile.stripeId)
return customer.card
except:
return None
def create_charge_account(self, stripeToken):
stripe.api_key = Setting.objects.get(key="Stripe Secret Key").value
customer = stripe.Customer.create( card=stripeToken,
description = self.user.email,
email = self.user.email)
self.stripeId = customer["id"]
self.save()
def get_card(self):
stripe.api_key = Setting.objects.get(key="Stripe Secret Key").value
if self.stripeId:
try:
return stripe.Customer.retrieve(self.stripeId)["active_card"]
except:
return None
else:
return None
def save(self):
if self.membershipBalance > Decimal(Setting.objects.get(key = "Lifetime Membership Cost").value):
self.lifetimeMember = True
super(UserProfile, self).save()
class Message(models.Model):
user = models.ForeignKey(User)
date = models.DateField(default=datetime.date.today)
text = models.TextField()
class Meta:
ordering = ["date"]
class Setting(models.Model):
key = models.CharField(max_length=128)
value = models.TextField()
def __unicode__(self):
return self.key
class AboutPage(models.Model):
title = models.CharField(max_length=128)
slug = models.SlugField()
content = models.TextField(help_text="Page contents. Html and <a target='_blank' href='http://en.wikipedia.org/wiki/Markdown'>markdown</a> are allowed<br/>To insert an image, attach it to the page and put a reference to it in the page with the following format: ![Alt text][Name] where Alt text is a simple description of the image and Name is the name of the image")
defaultPage = models.BooleanField()
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('coopshop.views.about', [self.title])
def save(self):
if self.defaultPage:
try:
temp = AboutPage.objects.get(defaultPage=True)
if self != temp:
temp.defaultPage = False
temp.save()
except DoesNotExist:
pass
super(AboutPage, self).save()
class AboutImage(models.Model):
page = models.ForeignKey(AboutPage)
name = models.CharField(max_length=32)
image = ImageField(upload_to="about")
| bsd-2-clause | 8,777,920,392,437,705,000 | 36.460621 | 370 | 0.716998 | false |
ctuning/ck | ck/repo/module/experiment.tune.opencl.clblast/module.py | 2 | 31659 | #
# Collective Knowledge: CK-powered CLBlast crowd-tuning
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, [email protected], http://fursin.net
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
line='================================================================'
ck_url='http://cknowledge.org/repo/web.php?native_action=show&native_module_uoa=program.optimization&scenario=6c5af99f945739bd'
ck_url1='http://cknowledge.org/repo/web.php?wcid=experiment.bench.dnn:'
ffstat='ck-stat-flat-characteristics.json'
ffmin='ck-stat-flat-min.json'
form_name='wa_web_form'
onchange='document.'+form_name+'.submit();'
hextra='<i><center>\n'
hextra+=' [ <a href="https://en.wikipedia.org/wiki/Collective_Knowledge_(software)">CK intro</a>, \n'
hextra+='<a href="https://arxiv.org/abs/1506.06256">universal workload crowd-tuning</a>; \n'
hextra+='<a href="https://www.researchgate.net/publication/304010295_Collective_Knowledge_Towards_RD_Sustainability">vision</a> and \n'
hextra+='<a href="https://www.youtube.com/watch?v=Q94yWxXUMP0">YouTube lecture</a> ] \n'
hextra+='</center></i>\n'
hextra+='<br>\n'
selector=[{'name':'Routine (CK wrapper)', 'key':'program_uoa'},
{'name':'GPGPU', 'key':'gpgpu_name'},
{'name':'CPU', 'key':'cpu_name'},
{'name':'Platform', 'key':'plat_name', 'new_line':'yes'},
{'name':'OS', 'key':'os_name'}]
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# crowdsource these experiments
def crowdsource(i):
"""
Input: {
(local) - if 'yes', local crowd-benchmarking, instead of public
(user) - force different user ID/email for demos
(choices) - force different choices to program pipeline
(repetitions) - statistical repetitions (default=1), for now statistical analysis is not used (TBD)
(no_compile) - if 'yes', skip program compilation (for Android)
(m) - dataset dimension M
(n) - dataset dimension N
(k) - dataset dimension K
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import copy
import os
import random
# Setting output
o=i.get('out','')
oo=''
if o=='con': oo='con'
quiet=i.get('quiet','')
duoa=i.get('data_uoa','')
if duoa=='':
duoa='clblast-tune-*'
er=i.get('exchange_repo','')
if er=='': er=ck.cfg['default_exchange_repo_uoa']
esr=i.get('exchange_subrepo','')
if esr=='': esr=ck.cfg['default_exchange_subrepo_uoa']
if i.get('local','')=='yes':
er='local'
esr=''
la=i.get('local_autotuning','')
repetitions=i.get('repetitions','')
if repetitions=='': repetitions=3
repetitions=int(repetitions)
record='no'
# Check if any input has . and convert to dict
for k in list(i.keys()):
if k.find('.')>0:
v=i[k]
kk='##'+k.replace('.','#')
del(i[k])
r=ck.set_by_flat_key({'dict':i, 'key':kk, 'value':v})
if r['return']>0: return r
choices=i.get('choices',{})
env=i.get('env',{})
if 'env' not in choices: choices['env']={}
r=ck.merge_dicts({'dict1':choices['env'], 'dict2':copy.deepcopy(env)})
env={}
xchoices=copy.deepcopy(choices)
# Get user
user=''
mcfg={}
ii={'action':'load',
'module_uoa':'module',
'data_uoa':cfg['module_deps']['program.optimization']}
r=ck.access(ii)
if r['return']==0:
mcfg=r['dict']
dcfg={}
ii={'action':'load',
'module_uoa':mcfg['module_deps']['cfg'],
'data_uoa':mcfg['cfg_uoa']}
r=ck.access(ii)
if r['return']>0 and r['return']!=16: return r
if r['return']!=16:
dcfg=r['dict']
user=dcfg.get('user_email','')
# Initialize local environment for program optimization ***********************************************************
pi=i.get('platform_info',{})
if len(pi)==0:
ii=copy.deepcopy(i)
ii['action']='initialize'
ii['module_uoa']=cfg['module_deps']['program.optimization']
ii['exchange_repo']=er
ii['exchange_subrepo']=esr
ii['skip_welcome']='yes'
ii['skip_log_wait']='yes'
ii['crowdtuning_type']='clblast-crowd-tuning'
r=ck.access(ii)
if r['return']>0: return r
pi=r['platform_info']
user=r.get('user','')
hos=pi['host_os_uoa']
hosd=pi['host_os_dict']
tos=pi['os_uoa']
tosd=pi['os_dict']
tbits=tosd.get('bits','')
remote=tosd.get('remote','')
tdid=pi['device_id']
features=pi.get('features',{})
fplat=features.get('platform',{})
fos=features.get('os',{})
fcpu=features.get('cpu',{})
fgpu=features.get('gpu',{})
plat_name=fplat.get('name','')
plat_uid=features.get('platform_uid','')
os_name=fos.get('name','')
os_uid=features.get('os_uid','')
cpu_name=fcpu.get('name','')
if cpu_name=='':
#cpu_name='unknown-'+fcpu.get('cpu_abi','')
# Likely CPU with multiple cores (such as big-little)
cpu_unique=features.get('cpu_unique',[])
for x in cpu_unique:
if cpu_name!='':
cpu_name+=' ; '
y=x.get('ck_arch_real_name','')
if y=='': y=x.get('ck_cpu_name','')
cpu_name+=y
cpu_uid=features.get('cpu_uid','')
gpu_name=fgpu.get('name','')
gpgpu_name=''
sn=fos.get('serial_number','')
gpgpu_uid=''
r=ck.access({'action':'detect',
'module_uoa':cfg['module_deps']['platform.gpgpu'],
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'type':'opencl',
'share':'yes',
'exchange_repo':er,
'exchange_subrepo':esr,
'select':'yes',
'out':'con'})
if r['return']>0: return r
gfeat=r.get('features',{})
gpgpus=gfeat.get('gpgpu',[])
cp_id=r['choices']['compute_platform_id']
cd_id=r['choices']['compute_device_id']
if len(gpgpus)>0:
gpgpu_name=gpgpus[0].get('gpgpu',{}).get('name','')
gpgpu_uid=gpgpus[0].get('gpgpu_uoa','')
# Check if need square
square=random.randint(0,1)
dim=0
if square==1: dim=random.randrange(64,513,64)
# Check input (later add ML-based exploration)
dm=i.get('m','').strip()
if dm=='':
dm=512
# if square==1: dm=dim
# else: dm=random.randrange(64,1025,64)
dm=int(dm)
env['CK_CLBLAST_MSIZE']=dm
dn=i.get('n','').strip()
if dn=='':
dn=512
# if square==1: dn=dim
# else: dn=random.randrange(64,1025,64)
dn=int(dn)
env['CK_CLBLAST_NSIZE']=dn
dk=i.get('n','').strip()
if dk=='':
dk=512
# if square==1: dk=dim
# else: dk=random.randrange(64,1025,64)
dk=int(dk)
env['CK_CLBLAST_KSIZE']=dk
clblast_iters=2 # In fact we rely on CK statistical repetitions with different run-time context ...
if i.get('clblast_iterations','')!='':
clblast_iters=i['clblast_iterations']
env['CK_CLBLAST_ITERATIONS']=clblast_iters
# Prepare CK pipeline for a given workload
ii={'action':'pipeline',
'module_uoa':cfg['module_deps']['program'],
'data_uoa':duoa,
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'skip_target':'yes',
'prepare':'yes',
'no_clean':i.get('no_compile',''),
'no_compile':i.get('no_compile',''),
'compute_platform_id':cp_id,
'compute_device_id':cd_id,
'env':env,
'choices':choices,
# 'dependencies':deps,
# 'cmd_key':run_cmd,
'no_state_check':'yes',
'no_compiler_description':'yes',
'skip_info_collection':'yes',
'skip_calibration':'yes',
'cpu_freq':'max',
'gpu_freq':'max',
'env_speed':'yes',
'energy':'no',
'skip_print_timers':'yes',
'generate_rnd_tmp_dir':'yes',
'out':oo}
rr=ck.access(ii)
if rr['return']>0: return rr
fail=rr.get('fail','')
if fail=='yes':
return {'return':10, 'error':'pipeline failed ('+rr.get('fail_reason','')+')'}
ready=rr.get('ready','')
if ready!='yes':
return {'return':11, 'error':'couldn\'t prepare universal CK program workflow'}
state=rr['state']
tmp_dir=state.get('tmp_dir','')
if tmp_dir=='': tmp_dir='tmp' # usually when no_compile
deps=rr['dependencies'] # resolved deps
ydeps=deps
if i.get('no_compile','')=='yes':
pdeps=os.path.join(pp,tmp_dir,'tmp-deps.json')
if os.path.isfile(pdeps):
qdeps=copy.deepcopy(deps) # need to keep current selected model for run-time
rz=ck.load_json_file({'json_file':pdeps})
if rz['return']>0: return rz
deps=rz['dict']
deps.update(qdeps)
# Check saved deps (if from bin package)
xk=deps['lib-clblast']
pbin=xk.get('cus',{}).get('path_bin','')
if pbin!='':
rx=ck.access({'action':'find_config_file',
'module_uoa':cfg['module_deps']['soft'],
'full_path':pbin,
'filename':'ck-install-saved.json'})
if rx['return']>0: return rx
if rx['found']=='yes':
if o=='con':
ck.out('')
ck.out('Found saved config file for CK binary distribution - reusing deps ...')
ck.out('')
ydeps=copy.deepcopy(deps)
dname=deps['lib-clblast']['dict']['data_name']
ydeps['lib-clblast']['dict']=copy.deepcopy(rx['dict'])
ydeps['lib-clblast']['dict']['data_name']=dname
# Clean pipeline
if 'ready' in rr: del(rr['ready'])
if 'fail' in rr: del(rr['fail'])
if 'return' in rr: del(rr['return'])
duoa=rr.get('choices',{}).get('data_uoa','')
# Prepare high-level experiment meta
meta={'cpu_name':cpu_name,
'os_name':os_name,
'plat_name':plat_name,
'gpu_name':gpu_name,
'gpgpu_name':gpgpu_name,
'program_uoa':duoa}
# Process deps
xdeps={}
xblas=''
for k in ydeps:
dp=ydeps[k]
dpd=dp.get('dict',{})
ptags=dpd.get('tags',[])
puoa=dpd.get('package_uoa','')
if puoa=='':
puoa=dp.get('cus',{}).get('used_package_uid','')
dname=dpd.get('data_name','')
xdeps[k]={'name':dp.get('name',''), 'data_name':dname, 'ver':dp.get('ver',''), 'package_uoa':puoa, 'package_tags':ptags}
# versions of engine sub deps
dvers={}
mdep=ydeps.get('lib-clblast',{})
mdeps=mdep.get('dict',{}).get('deps',{})
for k in mdeps:
dvers[k]=mdeps[k].get('ver','')
# Checking engine name
d_engine=xdeps.get('lib-clblast',{})
d_engine_name=d_engine.get('data_name','')
d_engine_package_uoa=d_engine.get('package_uoa','')
d_engine_ver=d_engine.get('ver','')
meta['xversions']=dvers
meta['xdeps']=xdeps
meta['choices']=xchoices
meta['dataset_m']=dm
meta['dataset_n']=dn
meta['dataset_k']=dk
meta['clblast_engine_name']=d_engine_name
meta['clblast_engine_package_uoa']=d_engine_package_uoa
mmeta=copy.deepcopy(meta)
# Extra meta which is not used to search similar case ...
mmeta['platform_uid']=plat_uid
mmeta['os_uid']=os_uid
mmeta['cpu_uid']=cpu_uid
mmeta['gpgpu_uid']=gpgpu_uid
mmeta['user']=user
# Check if already exists (to aggregate stats)
aggregated_stats={}
rduid=''
found=False
if o=='con':
ck.out('')
ck.out('Checking if results already exists in a public repo (to aggregate statistics) ...')
record_module_uoa=cfg['record_module_uoa']
# Find remote entry
ii={'action':'search',
'module_uoa':record_module_uoa,
'repo_uoa':er,
'remote_repo_uoa':esr,
'search_dict':{'meta':meta}}
rx=ck.access(ii)
if rx['return']>0: return rx
lst=rx['lst']
best_gflops=-1
if len(lst)==1:
rduid=lst[0]['data_uid']
found=True
if o=='con':
ck.out('')
ck.out('Results found. Pre-loading aggregated stats from '+rduid+' ...')
# Load stats
rx=ck.access({'action':'load',
'module_uoa':record_module_uoa,
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr})
if rx['return']==0:
drx=rx['dict']
if drx.get('best_gflops','')!='':
best_gflops=drx['best_gflops']
else:
ck.out('')
ck.out('WARNING: couldn\'t load data ('+rx['error']+')')
else:
rx=ck.gen_uid({})
if rx['return']>0: return rx
rduid=rx['data_uid']
# Run CK pipeline *****************************************************
pipeline=copy.deepcopy(rr)
if len(choices)>0:
r=ck.merge_dicts({'dict1':pipeline['choices'], 'dict2':xchoices})
if r['return']>0: return r
ii={'action':'autotune',
'module_uoa':cfg['module_deps']['pipeline'],
'data_uoa':cfg['module_deps']['program'],
'host_os':hos,
'target_os':tos,
'device_id':tdid,
'iterations':1,
'repetitions':repetitions,
'collect_all':'yes',
'process_multi_keys':['##characteristics#run#statistics*'],
'tmp_dir':tmp_dir,
'pipeline':pipeline,
'stat_flat_dict':aggregated_stats,
"features_keys_to_process":["##choices#*"],
"record_params": {
"search_point_by_features":"yes"
},
'out':oo}
rrr=ck.access(ii)
if rrr['return']>0: return rrr
##characteristics#run#statistics
ls=rrr.get('last_iteration_output',{})
state=ls.get('state',{})
xchoices=copy.deepcopy(ls.get('choices',{}))
lsaf=rrr.get('last_stat_analysis',{}).get('dict_flat',{})
# Check if has good result
al=rrr.get('all',[])
best_params={}
best_time=0
for q in al:
qq=q.get('characteristics_list',[])
for q1 in qq:
bc=q1.get('run',{}).get('statistics',{}).get('best_configuration',{})
gf=bc.get('GFLOPS','')
if gf=='': gf='0.0'
gf=float(gf)
if gf>best_gflops+0.5:
best_gflops=gf
best_params=bc.get('parameters',{})
best_time=bc.get('time','')
if len(best_params)==0:
ck.out('')
ck.out('WARNING: no better solutions was found by CLBlast ...')
else:
ddd={'meta':mmeta}
ddd['choices']=xchoices
ddd['best_parameters']=best_params
ddd['best_gflops']=best_gflops
ddd['best_time']=best_time
features=ls.get('features',{})
deps=ls.get('dependencies',{})
fail=ls.get('fail','')
fail_reason=ls.get('fail_reason','')
# Save pipeline
ddd['state']={'fail':fail, 'fail_reason':fail_reason}
ddd['user']=user
if o=='con':
ck.out('')
ck.out('Saving results to the remote public repo ('+rduid+') ...')
# Update meta
rx=ck.access({'action':'add',
'module_uoa':record_module_uoa,
'data_uoa':rduid,
'repo_uoa':er,
'remote_repo_uoa':esr,
'dict':ddd,
'sort_keys':'yes'})
if rx['return']>0: return rx
# Check host URL prefix and default module/action
url=ck_url+'&highlight_uid='+rduid+'#'+rduid
ck.out('')
r=ck.inp({'text':'Would you like to open a browser to see results "'+url+'" (y/N)? '})
if r['return']>0: return r
x=r['string'].strip().lower()
if x=='y' or x=='yes':
import webbrowser
webbrowser.open(url)
return {'return':0}
##############################################################################
# show results
def show(i):
"""
Input: {
(crowd_module_uoa) - if rendered from experiment crowdsourcing
(crowd_key) - add extra name to Web keys to avoid overlapping with original crowdsourcing HTML
(crowd_on_change) - reuse onchange doc from original crowdsourcing HTML
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import os
st=''
cmuoa=i.get('crowd_module_uoa','')
ckey=i.get('crowd_key','')
conc=i.get('crowd_on_change','')
if conc=='':
conc=onchange
hi_uid=i.get('highlight_uid','')
h=''
h+='<center>\n'
h+='\n\n<script language="JavaScript">function copyToClipboard (text) {window.prompt ("Copy to clipboard: Ctrl+C, Enter", text);}</script>\n\n'
h+=hextra
# Check host URL prefix and default module/action
rx=ck.access({'action':'form_url_prefix',
'module_uoa':'wfe',
'host':i.get('host',''),
'port':i.get('port',''),
'template':i.get('template','')})
if rx['return']>0: return rx
url0=rx['url']
template=rx['template']
url=url0
action=i.get('action','')
muoa=i.get('module_uoa','')
st=''
url+='action=index&module_uoa=wfe&native_action='+action+'&'+'native_module_uoa='+muoa
url1=url
# List entries
ii={'action':'search',
'module_uoa':work['self_module_uid'],
'add_meta':'yes'}
if cmuoa!='':
ii['module_uoa']=cmuoa
r=ck.access(ii)
if r['return']>0: return r
lst=r['lst']
# Check unique entries
choices={}
wchoices={}
for q in lst:
d=q['meta']
meta=d.get('meta',{})
for kk in selector:
kx=kk['key']
k=ckey+kx
if k not in choices:
choices[k]=[]
wchoices[k]=[{'name':'','value':''}]
kflat=kk.get('flat_key','')
if kflat=='': kflat='##'+kx
rx=ck.get_by_flat_key({'dict':meta, 'key':kflat})
if rx['return']>0: return rx
v=rx['value']
if v==None: v=''
if v!='':
if v not in choices[k]:
choices[k].append(v)
wchoices[k].append({'name':v, 'value':v})
# Prepare query div ***************************************************************
if cmuoa=='':
# Start form + URL (even when viewing entry)
r=ck.access({'action':'start_form',
'module_uoa':cfg['module_deps']['wfe'],
'url':url1,
'name':form_name})
if r['return']>0: return r
h+=r['html']
for kk in selector:
kx=kk['key']
k=ckey+kx
n=kk['name']
nl=kk.get('new_line','')
if nl=='yes':
h+='<br>\n<div id="ck_entries_space8"></div>\n'
v=''
if i.get(k,'')!='':
v=i[k]
kk['value']=v
# Show hardware
ii={'action':'create_selector',
'module_uoa':cfg['module_deps']['wfe'],
'data':wchoices.get(k,[]),
'name':k,
'onchange':conc,
'skip_sort':'no',
'selected_value':v}
r=ck.access(ii)
if r['return']>0: return r
h+='<b>'+n+':</b> '+r['html'].strip()+'\n'
# Check hidden
if hi_uid!='':
h+='<input type="hidden" name="highlight_uid" value="'+hi_uid+'">\n'
h+='<br><br>'
# Prune list
plst=[]
for q in lst:
d=q['meta']
meta=d.get('meta',{})
# Check selector
skip=False
for kk in selector:
k=kk['key']
n=kk['name']
v=kk.get('value','')
kflat=kk.get('flat_key','')
if kflat=='': kflat='##'+kx
rx=ck.get_by_flat_key({'dict':meta, 'key':kflat})
if rx['return']>0: return rx
vxx=rx['value']
if vxx==None: vxx=''
if v!='' and vxx!=v:
skip=True
if not skip:
plst.append(q)
# Check if too many
lplst=len(plst)
if lplst==0:
h+='<b>No results found!</b>'
return {'return':0, 'html':h, 'style':st}
elif lplst>50:
h+='<b>Too many entries to show ('+str(lplst)+') - please, prune list further!</b>'
return {'return':0, 'html':h, 'style':st}
# Prepare table
h+='<table border="1" cellpadding="7" cellspacing="0">\n'
ha='align="center" valign="top"'
hb='align="left" valign="top"'
h+=' <tr style="background-color:#dddddd">\n'
h+=' <td '+ha+'><b>#</b></td>\n'
h+=' <td '+ha+'><b>GPGPU</b></td>\n'
h+=' <td '+ha+'><b>CPU</b></td>\n'
h+=' <td '+ha+'><b>Platform</b></td>\n'
h+=' <td '+ha+'><b>OS</b></td>\n'
h+=' <td '+ha+'><b>Routine (CK wrapper)</b></td>\n'
h+=' <td '+ha+'><b>GFLOPs</b></td>\n'
h+=' <td '+ha+'><b>Time (s)</b></td>\n'
h+=' <td '+ha+'><b>Dataset (M N K)</b></td>\n'
h+=' <td '+ha+'><b>Best parameters</b></td>\n'
h+=' <td '+ha+'><b>Choices (env)</b></td>\n'
h+=' <td '+ha+'><b>CLBlast engine</b></td>\n'
h+=' <td '+ha+'><b>Power consumption (W)<br>min / max</td>\n'
h+=' <td '+ha+'><b>Memory usage (MB)</td>\n'
h+=' <td '+ha+'><b>Bug detected?</b></td>\n'
h+=' <td '+ha+'><b>User</b></td>\n'
h+=' <td '+ha+'><b>Replay</b></td>\n'
h+=' <tr>\n'
# Dictionary to hold target meta
tm={}
ix=0
bgraph={'0':[]} # Just for graph demo
if hi_uid!='':
bgraph['1']=[]
# Sort
splst=sorted(plst, key=lambda x: x.get('meta',{}).get('best_gflops',0), reverse=True)
for q in splst:
ix+=1
duid=q['data_uid']
path=q['path']
d=q['meta']
# Check if has statistics
dstat={}
fstat=os.path.join(path,'ck-stat-flat-characteristics.json')
if os.path.isfile(fstat):
r=ck.load_json_file({'json_file':fstat, 'dict':dstat})
if r['return']>0: return r
dstat=r['dict']
x=''
# Check if has stats
x0=dstat.get("##characteristics#run#time_fwbw_ms#min",None)
meta=d.get('meta',{})
choices=d.get('choices',{})
env=choices.get('env',{})
params=choices.get('params',{}).get('params',{})
best_gflops=d.get('best_gflops',0)
best_time=d.get('best_time',0)
xdeps=meta.get('xdeps',{})
d_engine=xdeps.get('lib-clblast',{})
d_engine_name=d_engine.get('data_name','')
d_engine_package_uoa=d_engine.get('package_uoa','')
d_engine_ver=d_engine.get('ver','')
plat_name=meta.get('plat_name','')
cpu_name=meta.get('cpu_name','')
os_name=meta.get('os_name','')
gpgpu_name=meta.get('gpgpu_name','')
program_uoa=meta.get('program_uoa','')
plat_uid=meta.get('platform_uid','')
cpu_uid=meta.get('cpu_uid','')
os_uid=meta.get('os_uid','')
gpu_uid=meta.get('gpu_uid','')
gpgpu_uid=meta.get('gpgpu_uid','')
user=meta.get('user','')
te=d.get('characteristics',{}).get('run',{})
# bgc='afffaf'
bgc='dfffdf'
fail=d.get('state',{}).get('fail','')
fail_reason=d.get('state',{}).get('fail_reason','')
if fail=='yes':
if fail_reason=='': fail_reason='yes'
bgc='ffafaf'
elif hi_uid!='' and duid==hi_uid:
bgc='9fff9f'
# bgraph['0'].append([ix,None])
# bgraph['1'].append([ix,x0])
bgraph['0'].append([ix,best_gflops])
if fail!='yes' and best_gflops!=0 and duid!=hi_uid:
if hi_uid!='': bgraph['1'].append([ix,best_gflops])
bg=' style="background-color:#'+bgc+';"'
h+=' <tr'+bg+'>\n'
# Number
h+=' <td '+ha+'>'+str(ix)+'</a></td>\n'
# Platform, etc ...
x=gpgpu_name
if gpgpu_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform.gpgpu']+':'+gpgpu_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=cpu_name
if cpu_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform.cpu']+':'+cpu_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=plat_name
if plat_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform']+':'+plat_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x=os_name
if os_uid!='':
x='<a href="'+url0+'&wcid='+cfg['module_deps']['platform']+':'+os_uid+'">'+x+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
x1=program_uoa
if x1.startswith('clblast-tune-'): x1=x1[13:]
x='<a href="'+url0+'&wcid='+cfg['module_deps']['program']+':'+program_uoa+'">'+x1+'</a>'
h+=' <td '+ha+'>'+x+'</td>\n'
# All files
uu1=work['self_module_uid']
if cmuoa!='': uu1=cmuoa
uu2=str(ix)+') <a href="'+url0+'&wcid='+uu1+':'+duid+'">'+duid+'</a>'
uu3='[ <a href="'+url0+'&wcid='+uu1+':'+duid+'">See raw files</a> ]'
# GFLOPs
h+=' <td '+ha+'>'+('%.1f'%best_gflops)+' ± ?</a></td>\n'
# Time
h+=' <td '+ha+'>'+('%.3f'%best_time)+' ± ?</a></td>\n'
# Dataset
x=''
dm=meta.get('dataset_m','')
dn=meta.get('dataset_n','')
dk=meta.get('dataset_k','')
x=str(dm)+' x '+str(dn)+' x '+str(dk)
h+=' <td '+ha+'>'+x+'</a></td>\n'
# Best parameters
x=''
bp=d.get('best_parameters',{})
for k in sorted(bp):
v=bp[k]
x+=str(k)+'='+str(v)+'\n'
x=x.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x1=''
if x!='':
x1+='<input type="button" class="ck_small_button" onClick="alert(\''+x+'\');" value="View all">'
h+=' <td '+ha+'>'+x1+'</td>\n'
# Choices (for now env)
# x='<table border="0" cellpadding="0" cellspacing="2">\n'
x=''
for k in sorted(env):
v=env[k]
x+=str(k)+'='+str(v)+'\n'
# x+='<tr><td>'+str(k)+'=</td><td>'+str(v)+'</td></tr>\n'
# x+='</table>\n'
# x=x.replace("'","\'").replace('"',"\\'").replace('\n','\\n')
x=x.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x1=''
if x!='':
x1+='<input type="button" class="ck_small_button" onClick="alert(\''+x+'\');" value="View all">'
h+=' <td '+ha+'>'+x1+'</td>\n'
# Engine
x=''
if d_engine_ver!='':
x+='Version <b><a href="'+url0+'&wcid=package:'+d_engine_package_uoa+'">'+d_engine_ver+'</a></b>'
# Versions
ver=''
dver=meta.get('xversions',{})
for dx in sorted(dver):
vx=dver[dx]
if vx!=None and vx!='':
ver+=dx+': '+str(dver[dx])+'\n'
ver=ver.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
if ver!='':
ver='<input type="button" class="ck_small_button" onClick="alert(\''+ver+'\');" value="See versions of all deps">'
h+=' <td '+ha+'>'+x+'<br><br>'+ver+'</td>\n'
# Power consumption (TBD)
x=''
h+=' <td '+ha+'>'+x+'</td>\n'
# Memory usage
x=''
mem=dstat.get("##characteristics#run#memory_mbytes#max",None)
if mem!=None:
x=str(int(mem))+' MB'
h+=' <td '+ha+'>'+x+'</td>\n'
# Crowdsourcing bug detection
x=fail_reason
if x=='':
x=''
else:
fail_reason=fail_reason.replace("\'","'").replace("'","\\'").replace('\"','"').replace('"',"\\'").replace('\n','\\n')
x='Yes <input type="button" class="ck_small_button" onClick="alert(\''+fail_reason+'\');" value="Log">'
h+=' <td '+ha+'>'+x+'</td>\n'
h+=' <td '+ha+'><a href="'+url0+'&action=index&module_uoa=wfe&native_action=show&native_module_uoa=experiment.user">'+user+'</a></td>\n'
h+=' <td '+ha+'><input type="button" class="ck_small_button" onClick="copyToClipboard(\'TBD - need support in CLBlast\');" value="Replay"><br><br>\n'
h+=' '+uu3+'</td>\n'
h+=' <tr>\n'
h+='</table>\n'
h+='</center>\n'
if cmuoa=='':
h+='</form>\n'
if len(bgraph['0'])>0:
ii={'action':'plot',
'module_uoa':cfg['module_deps']['graph'],
"table":bgraph,
"h_lines":[1.0],
"ymin":0,
"ignore_point_if_none":"yes",
"plot_type":"d3_2d_bars",
"display_y_error_bar":"no",
"title":"Powered by Collective Knowledge",
"axis_x_desc":"Experiment",
"axis_y_desc":"GFLOPs",
"plot_grid":"yes",
"d3_div":"ck_interactive",
"image_width":"900",
"image_height":"400",
"wfe_url":url0}
r=ck.access(ii)
if r['return']==0:
x=r.get('html','')
if x!='':
st+=r.get('style','')
h+='<br>\n'
h+='<center>\n'
h+='<div id="ck_box_with_shadow" style="width:920px;">\n'
h+=' <div id="ck_interactive" style="text-align:center">\n'
h+=x+'\n'
h+=' </div>\n'
h+='</div>\n'
h+='</center>\n'
return {'return':0, 'html':h, 'style':st}
##############################################################################
# replay experiment (TBD)
def replay(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':1, 'error':'TBD: need support in CLBlast'}
##############################################################################
# browse public results
def browse(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import webbrowser
ck.out('Opening web page '+ck_url+' ...')
webbrowser.open(ck_url)
return {'return':0}
| bsd-3-clause | -4,807,519,586,556,307,000 | 27.49595 | 159 | 0.479642 | false |
tchaikov/teuthology | teuthology/task/internal.py | 1 | 23513 | """
Internal tasks are tasks that are started from the teuthology infrastructure.
Note that there is no corresponding task defined for this module. All of
the calls are made from other modules, most notably teuthology/run.py
"""
from cStringIO import StringIO
import contextlib
import logging
import os
import time
import yaml
import subprocess
from teuthology import lockstatus
from teuthology import lock
from teuthology import misc
from teuthology import provision
from teuthology.job_status import get_status, set_status
from teuthology.config import config as teuth_config
from teuthology.parallel import parallel
from teuthology.suite import has_packages_for_distro
from ..orchestra import cluster, remote, run
from .. import report
log = logging.getLogger(__name__)
@contextlib.contextmanager
def base(ctx, config):
"""
Create the test directory that we will be using on the remote system
"""
log.info('Creating test directory...')
testdir = misc.get_testdir(ctx)
run.wait(
ctx.cluster.run(
args=[
'mkdir', '-m0755', '--',
testdir,
],
wait=False,
)
)
try:
yield
finally:
log.info('Tidying up after the test...')
# if this fails, one of the earlier cleanups is flawed; don't
# just cram an rm -rf here
run.wait(
ctx.cluster.run(
args=[
'rmdir',
'--',
testdir,
],
wait=False,
),
)
@contextlib.contextmanager
def lock_machines(ctx, config):
"""
Lock machines. Called when the teuthology run finds and locks
new machines. This is not called if the one has teuthology-locked
machines and placed those keys in the Targets section of a yaml file.
"""
# It's OK for os_type and os_version to be None here. If we're trying
# to lock a bare metal machine, we'll take whatever is available. If
# we want a vps, defaults will be provided by misc.get_distro and
# misc.get_distro_version in provision.create_if_vm
os_type = ctx.config.get("os_type")
os_version = ctx.config.get("os_version")
arch = ctx.config.get('arch')
log.info('Locking machines...')
assert isinstance(config[0], int), 'config[0] must be an integer'
machine_type = config[1]
how_many = config[0]
# We want to make sure there are always this many machines available
to_reserve = 5
# change the status during the locking process
report.try_push_job_info(ctx.config, dict(status='waiting'))
while True:
# get a candidate list of machines
machines = lock.list_locks(machine_type=machine_type, up=True,
locked=False, count=how_many + to_reserve)
if machines is None:
if ctx.block:
log.error('Error listing machines, trying again')
time.sleep(20)
continue
else:
raise RuntimeError('Error listing machines')
# make sure there are machines for non-automated jobs to run
if len(machines) < to_reserve + how_many and ctx.owner.startswith('scheduled'):
if ctx.block:
log.info(
'waiting for more machines to be free (need %s + %s, have %s)...',
to_reserve,
how_many,
len(machines),
)
time.sleep(10)
continue
else:
assert 0, ('not enough machines free; need %s + %s, have %s' %
(to_reserve, how_many, len(machines)))
newly_locked = lock.lock_many(ctx, how_many, machine_type, ctx.owner,
ctx.archive, os_type, os_version, arch)
if not newly_locked and not isinstance(newly_locked, list):
raise RuntimeError('Invalid parameters specified')
if len(newly_locked) == how_many:
vmlist = []
for lmach in newly_locked:
if misc.is_vm(lmach):
vmlist.append(lmach)
if vmlist:
log.info('Waiting for virtual machines to come up')
keys_dict = dict()
loopcount = 0
while len(keys_dict) != len(vmlist):
loopcount += 1
time.sleep(10)
keys_dict = lock.ssh_keyscan(vmlist)
log.info('virtual machine is still unavailable')
if loopcount == 40:
loopcount = 0
log.info('virtual machine(s) still not up, ' +
'recreating unresponsive ones.')
for guest in vmlist:
if guest not in keys_dict.keys():
log.info('recreating: ' + guest)
full_name = misc.canonicalize_hostname(guest)
provision.destroy_if_vm(ctx, full_name)
provision.create_if_vm(ctx, full_name)
if lock.do_update_keys(keys_dict):
log.info("Error in virtual machine keys")
newscandict = {}
for dkey in newly_locked.iterkeys():
stats = lockstatus.get_status(dkey)
newscandict[dkey] = stats['ssh_pub_key']
ctx.config['targets'] = newscandict
else:
ctx.config['targets'] = newly_locked
locked_targets = yaml.safe_dump(
ctx.config['targets'],
default_flow_style=False
).splitlines()
log.info('\n '.join(['Locked targets:', ] + locked_targets))
# successfully locked machines, change status back to running
report.try_push_job_info(ctx.config, dict(status='running'))
break
elif not ctx.block:
assert 0, 'not enough machines are available'
log.warn('Could not lock enough machines, waiting...')
time.sleep(10)
try:
yield
finally:
if ctx.config.get('unlock_on_failure', False) or \
get_status(ctx.summary) == 'pass':
log.info('Unlocking machines...')
for machine in ctx.config['targets'].iterkeys():
lock.unlock_one(ctx, machine, ctx.owner)
def save_config(ctx, config):
"""
Store the config in a yaml file
"""
log.info('Saving configuration')
if ctx.archive is not None:
with file(os.path.join(ctx.archive, 'config.yaml'), 'w') as f:
yaml.safe_dump(ctx.config, f, default_flow_style=False)
def check_lock(ctx, config):
"""
Check lock status of remote machines.
"""
if not teuth_config.lock_server or ctx.config.get('check-locks') is False:
log.info('Lock checking disabled.')
return
log.info('Checking locks...')
for machine in ctx.config['targets'].iterkeys():
status = lockstatus.get_status(machine)
log.debug('machine status is %s', repr(status))
assert status is not None, \
'could not read lock status for {name}'.format(name=machine)
assert status['up'], 'machine {name} is marked down'.format(name=machine)
assert status['locked'], \
'machine {name} is not locked'.format(name=machine)
assert status['locked_by'] == ctx.owner, \
'machine {name} is locked by {user}, not {owner}'.format(
name=machine,
user=status['locked_by'],
owner=ctx.owner,
)
def check_packages(ctx, config):
"""
Checks gitbuilder to determine if there are missing packages for this job.
If there are missing packages, fail the job.
"""
log.info("Checking packages...")
os_type = ctx.config.get("os_type", None)
sha1 = ctx.config.get("sha1", None)
# We can only do this check if there are a defined sha1 and os_type
# in the job config.
if os_type and sha1:
log.info(
"Checking packages for os_type '{os}' and ceph hash '{ver}'".format(
os=os_type,
ver=sha1,
)
)
if not has_packages_for_distro(sha1, os_type):
msg = "Packages for os_type '{os}' and ceph hash '{ver}' not found"
msg = msg.format(
os=os_type,
ver=sha1,
)
log.error(msg)
# set the failure message and update paddles with the status
ctx.summary["failure_reason"] = msg
set_status(ctx.summary, "dead")
report.try_push_job_info(ctx.config, dict(status='dead'))
raise RuntimeError(msg)
else:
log.info(
"Checking packages skipped, missing os_type '{os}' or ceph hash '{ver}'".format(
os=os_type,
ver=sha1,
)
)
@contextlib.contextmanager
def timer(ctx, config):
"""
Start the timer used by teuthology
"""
log.info('Starting timer...')
start = time.time()
try:
yield
finally:
duration = time.time() - start
log.info('Duration was %f seconds', duration)
ctx.summary['duration'] = duration
def connect(ctx, config):
"""
Open a connection to a remote host.
"""
log.info('Opening connections...')
remotes = []
machs = []
for name in ctx.config['targets'].iterkeys():
machs.append(name)
for t, key in ctx.config['targets'].iteritems():
t = misc.canonicalize_hostname(t)
log.debug('connecting to %s', t)
try:
if ctx.config['sshkeys'] == 'ignore':
key = None
except (AttributeError, KeyError):
pass
remotes.append(
remote.Remote(name=t, host_key=key, keep_alive=True, console=None))
ctx.cluster = cluster.Cluster()
if 'roles' in ctx.config:
for rem, roles in zip(remotes, ctx.config['roles']):
assert all(isinstance(role, str) for role in roles), \
"Roles in config must be strings: %r" % roles
ctx.cluster.add(rem, roles)
log.info('roles: %s - %s' % (rem, roles))
else:
for rem in remotes:
ctx.cluster.add(rem, rem.name)
def push_inventory(ctx, config):
if not teuth_config.lock_server:
return
def push():
for rem in ctx.cluster.remotes.keys():
info = rem.inventory_info
lock.update_inventory(info)
try:
push()
except Exception:
log.exception("Error pushing inventory")
def serialize_remote_roles(ctx, config):
"""
Provides an explicit mapping for which remotes have been assigned what roles
So that other software can be loosely coupled to teuthology
"""
if ctx.archive is not None:
with file(os.path.join(ctx.archive, 'info.yaml'), 'r+') as info_file:
info_yaml = yaml.safe_load(info_file)
info_file.seek(0)
info_yaml['cluster'] = dict([(rem.name, {'roles': roles}) for rem, roles in ctx.cluster.remotes.iteritems()])
yaml.safe_dump(info_yaml, info_file, default_flow_style=False)
def check_ceph_data(ctx, config):
"""
Check for old /var/lib/ceph directories and detect staleness.
"""
log.info('Checking for old /var/lib/ceph...')
processes = ctx.cluster.run(
args=[
'test', '!', '-e', '/var/lib/ceph',
],
wait=False,
)
failed = False
for proc in processes:
try:
proc.wait()
except run.CommandFailedError:
log.error('Host %s has stale /var/lib/ceph, check lock and nuke/cleanup.', proc.remote.shortname)
failed = True
if failed:
raise RuntimeError('Stale /var/lib/ceph detected, aborting.')
def check_conflict(ctx, config):
"""
Note directory use conflicts and stale directories.
"""
log.info('Checking for old test directory...')
testdir = misc.get_testdir(ctx)
processes = ctx.cluster.run(
args=[
'test', '!', '-e', testdir,
],
wait=False,
)
failed = False
for proc in processes:
try:
proc.wait()
except run.CommandFailedError:
log.error('Host %s has stale test directory %s, check lock and cleanup.', proc.remote.shortname, testdir)
failed = True
if failed:
raise RuntimeError('Stale jobs detected, aborting.')
@contextlib.contextmanager
def archive(ctx, config):
"""
Handle the creation and deletion of the archive directory.
"""
log.info('Creating archive directory...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--', archive_dir,
],
wait=False,
)
)
try:
yield
except Exception:
# we need to know this below
set_status(ctx.summary, 'fail')
raise
finally:
passed = get_status(ctx.summary) == 'pass'
if ctx.archive is not None and \
not (ctx.config.get('archive-on-error') and passed):
log.info('Transferring archived files...')
logdir = os.path.join(ctx.archive, 'remote')
if (not os.path.exists(logdir)):
os.mkdir(logdir)
for rem in ctx.cluster.remotes.iterkeys():
path = os.path.join(logdir, rem.shortname)
misc.pull_directory(rem, archive_dir, path)
log.info('Removing archive directory...')
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'--',
archive_dir,
],
wait=False,
),
)
@contextlib.contextmanager
def sudo(ctx, config):
"""
Enable use of sudo
"""
log.info('Configuring sudo...')
sudoers_file = '/etc/sudoers'
backup_ext = '.orig.teuthology'
tty_expr = r's/^\([^#]*\) \(requiretty\)/\1 !\2/g'
pw_expr = r's/^\([^#]*\) !\(visiblepw\)/\1 \2/g'
run.wait(
ctx.cluster.run(
args="sudo sed -i{ext} -e '{tty}' -e '{pw}' {path}".format(
ext=backup_ext, tty=tty_expr, pw=pw_expr,
path=sudoers_file
),
wait=False,
)
)
try:
yield
finally:
log.info('Restoring {0}...'.format(sudoers_file))
ctx.cluster.run(
args="sudo mv -f {path}{ext} {path}".format(
path=sudoers_file, ext=backup_ext
)
)
@contextlib.contextmanager
def coredump(ctx, config):
"""
Stash a coredump of this system if an error occurs.
"""
log.info('Enabling coredump saving...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'install', '-d', '-m0755', '--',
'{adir}/coredump'.format(adir=archive_dir),
run.Raw('&&'),
'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),
],
wait=False,
)
)
try:
yield
finally:
run.wait(
ctx.cluster.run(
args=[
'sudo', 'sysctl', '-w', 'kernel.core_pattern=core',
run.Raw('&&'),
# don't litter the archive dir if there were no cores dumped
'rmdir',
'--ignore-fail-on-non-empty',
'--',
'{adir}/coredump'.format(adir=archive_dir),
],
wait=False,
)
)
# set status = 'fail' if the dir is still there = coredumps were
# seen
for rem in ctx.cluster.remotes.iterkeys():
r = rem.run(
args=[
'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then',
'echo', 'OK', run.Raw(';'),
'fi',
],
stdout=StringIO(),
)
if r.stdout.getvalue() != 'OK\n':
log.warning('Found coredumps on %s, flagging run as failed', rem)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
'Found coredumps on {rem}'.format(rem=rem)
@contextlib.contextmanager
def syslog(ctx, config):
"""
start syslog / stop syslog on exit.
"""
if ctx.archive is None:
# disable this whole feature if we're not going to archive the data anyway
yield
return
log.info('Starting syslog monitoring...')
archive_dir = misc.get_archive_dir(ctx)
run.wait(
ctx.cluster.run(
args=[
'mkdir', '-m0755', '--',
'{adir}/syslog'.format(adir=archive_dir),
],
wait=False,
)
)
CONF = '/etc/rsyslog.d/80-cephtest.conf'
conf_fp = StringIO('''
kern.* -{adir}/syslog/kern.log;RSYSLOG_FileFormat
*.*;kern.none -{adir}/syslog/misc.log;RSYSLOG_FileFormat
'''.format(adir=archive_dir))
try:
for rem in ctx.cluster.remotes.iterkeys():
misc.sudo_write_file(
remote=rem,
path=CONF,
data=conf_fp,
)
conf_fp.seek(0)
run.wait(
ctx.cluster.run(
args=[
'sudo',
'service',
# a mere reload (SIGHUP) doesn't seem to make
# rsyslog open the files
'rsyslog',
'restart',
],
wait=False,
),
)
yield
finally:
log.info('Shutting down syslog monitoring...')
run.wait(
ctx.cluster.run(
args=[
'sudo',
'rm',
'-f',
'--',
CONF,
run.Raw('&&'),
'sudo',
'service',
'rsyslog',
'restart',
],
wait=False,
),
)
# race condition: nothing actually says rsyslog had time to
# flush the file fully. oh well.
log.info('Checking logs for errors...')
for rem in ctx.cluster.remotes.iterkeys():
log.debug('Checking %s', rem.name)
r = rem.run(
args=[
'egrep', '--binary-files=text',
'\\bBUG\\b|\\bINFO\\b|\\bDEADLOCK\\b',
run.Raw('{adir}/syslog/*.log'.format(adir=archive_dir)),
run.Raw('|'),
'grep', '-v', 'task .* blocked for more than .* seconds',
run.Raw('|'),
'grep', '-v', 'lockdep is turned off',
run.Raw('|'),
'grep', '-v', 'trying to register non-static key',
run.Raw('|'),
'grep', '-v', 'DEBUG: fsize', # xfs_fsr
run.Raw('|'),
'grep', '-v', 'CRON', # ignore cron noise
run.Raw('|'),
'grep', '-v', 'BUG: bad unlock balance detected', # #6097
run.Raw('|'),
'grep', '-v', 'inconsistent lock state', # FIXME see #2523
run.Raw('|'),
'grep', '-v', '*** DEADLOCK ***', # part of lockdep output
run.Raw('|'),
'grep', '-v', 'INFO: possible irq lock inversion dependency detected', # FIXME see #2590 and #147
run.Raw('|'),
'grep', '-v', 'INFO: NMI handler (perf_event_nmi_handler) took too long to run',
run.Raw('|'),
'grep', '-v', 'INFO: recovery required on readonly',
run.Raw('|'),
'head', '-n', '1',
],
stdout=StringIO(),
)
stdout = r.stdout.getvalue()
if stdout != '':
log.error('Error in syslog on %s: %s', rem.name, stdout)
set_status(ctx.summary, 'fail')
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = \
"'{error}' in syslog".format(error=stdout)
log.info('Compressing syslogs...')
run.wait(
ctx.cluster.run(
args=[
'find',
'{adir}/syslog'.format(adir=archive_dir),
'-name',
'*.log',
'-print0',
run.Raw('|'),
'sudo',
'xargs',
'-0',
'--no-run-if-empty',
'--',
'gzip',
'--',
],
wait=False,
),
)
def vm_setup(ctx, config):
"""
Look for virtual machines and handle their initialization
"""
all_tasks = [x.keys()[0] for x in ctx.config['tasks']]
need_chef = False
if 'chef' in all_tasks or 'kernel' in all_tasks:
need_chef = True
with parallel() as p:
editinfo = os.path.join(os.path.dirname(__file__),'edit_sudoers.sh')
for rem in ctx.cluster.remotes.iterkeys():
mname = rem.shortname
if misc.is_vm(mname):
r = rem.run(args=['test', '-e', '/ceph-qa-ready',],
stdout=StringIO(),
check_status=False,)
if r.returncode != 0:
p1 = subprocess.Popen(['cat', editinfo], stdout=subprocess.PIPE)
p2 = subprocess.Popen(
[
'ssh',
'-o', 'StrictHostKeyChecking=no',
'-t', '-t',
str(rem),
'sudo',
'sh'
],
stdin=p1.stdout, stdout=subprocess.PIPE
)
_, err = p2.communicate()
if err:
log.info("Edit of /etc/sudoers failed: %s", err)
if need_chef:
p.spawn(_download_and_run_chef, rem)
def _download_and_run_chef(remote_):
"""
Run ceph_qa_chef.
"""
log.info('Running ceph_qa_chef on %s', remote_)
remote_.run(
args=[
'wget', '-q', '-O-',
'http://ceph.com/git/?p=ceph-qa-chef.git;a=blob_plain;f=solo/solo-from-scratch;hb=HEAD',
run.Raw('|'),
'sh',
],
label="run chef solo-from-scratch"
)
| mit | 1,406,336,745,813,012,200 | 33.375731 | 121 | 0.493046 | false |
alex/warehouse | warehouse/admin/routes.py | 1 | 1309 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def includeme(config):
# We need to get the value of the Warehouse and Forklift domains, we'll use
# these to segregate the Warehouse routes from the Forklift routes until
# Forklift is properly split out into it's own project.
warehouse = config.get_settings().get("warehouse.domain")
# General Admin pages
config.add_route("admin.dashboard", "/admin/", domain=warehouse)
config.add_route("admin.login", "/admin/login/", domain=warehouse)
config.add_route("admin.logout", "/admin/logout/", domain=warehouse)
# User related Admin pages
config.add_route("admin.user.list", "/admin/users/", domain=warehouse)
config.add_route(
"admin.user.detail",
"/admin/users/{user_id}/",
domain=warehouse,
)
| apache-2.0 | -575,616,291,170,953,600 | 41.225806 | 79 | 0.715814 | false |
ddsc/ddsc-core | ddsc_core/management/commands/import_source.py | 1 | 1972 | # (c) Fugro GeoServices. MIT licensed, see LICENSE.rst.
from django.core.management.base import BaseCommand
from ddsc_core.models.models import Source, Manufacturer
from django.utils import timezone
import csv
import string
class Command(BaseCommand):
args = '<CSV file>'
help = 'Imports a CSV file of source into the database.'
def handle(self, *args, **options):
with open(args[0], 'rb') as f:
reader = csv.reader(f)
for row in reader:
if len(row) >= 0:
uuid = row[0]
name = row[1]
manufacturer = row[2]
print manufacturer # for testing
try:
manufacturer = Manufacturer.objects.get(
name=manufacturer)
manufacturer_id = manufacturer.id
except:
manufacturer = Manufacturer.objects.get(name='unknown')
manufacturer_id = manufacturer.id
type = string.lower(row[3])
def f(x):
return {
'calculated': 0,
'sensor': 1,
'simulated': 2,
'derived': 3,
}.get(x, 1)
source_type = f(type)
details = row[4]
frequency = row[5]
timeout = row[6]
Source.objects.create(name=name,
source_type=source_type,
manufacturer_id=manufacturer_id,
details=details,
created=timezone.now(),
uuid=uuid)
print "completeted~!"
| mit | -5,022,244,595,836,610,000 | 38.244898 | 79 | 0.411765 | false |
kalhartt/python-groove | examples/replaygain.py | 1 | 1911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""replaygain
Calculate replaygain values for a set of files
Usage:
replaygain [-v...] FILE...
Options:
-v --verbose Set logging level, repeat to increase verbosity
"""
from __future__ import print_function, unicode_literals
import logging
import sys
from docopt import docopt
import groove
_log = logging.getLogger(__name__)
def loudness_to_replaygain(loudness):
"""Convert loudness to replaygain value, clamped to (-51.0, 51.0)"""
rg = -18.0 - loudness
rg = min(max(rg, -51.0), 51.0)
return rg
def main(infiles):
_log.debug('Creating a playlist and loudness detector')
loudness_detector = groove.LoudnessDetector()
playlist = groove.Playlist()
_log.debug('Opening files and adding to playlist')
for infile in infiles:
gfile = groove.File(infile)
gfile.open()
playlist.append(gfile)
_log.debug('Attaching playlist to detector')
loudness_detector.playlist = playlist
_log.debug('Processing playlist')
for loudness, peak, duration, pitem in loudness_detector:
if pitem is None:
print('\nAll files complete.')
else:
print('\nfile complete: {0}\n'.format(pitem.file.filename))
print('suggested gain: {0:.2f}, sample peak: {1}, duration: {2}'
.format(loudness_to_replaygain(loudness), peak, duration))
_log.debug('Detaching playlist')
loudness_detector.playlist = None
_log.debug('Closing files and clearing playlist')
while len(playlist) > 0:
playlist[0].file.close()
del playlist[0]
return 0
if __name__ == '__main__':
args = docopt(__doc__)
loglvl = {
0: logging.WARNING,
1: logging.INFO,
}.get(args['--verbose'], logging.DEBUG)
logging.basicConfig(level=loglvl)
groove.init()
sys.exit(main(
args['FILE']
))
| mit | -4,648,362,452,070,011,000 | 23.818182 | 72 | 0.63056 | false |
indigo-dc/im | IM/VirtualMachine.py | 1 | 46072 | # IM - Infrastructure Manager
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import threading
import shutil
import string
import json
import tempfile
import logging
from netaddr import IPNetwork, IPAddress
from radl.radl import network, RADL
from radl.radl_parse import parse_radl
from IM.LoggerMixin import LoggerMixin
from IM.SSH import SSH
from IM.SSHRetry import SSHRetry
from IM.config import Config
from IM import get_user_pass_host_port
import IM.CloudInfo
class VirtualMachine(LoggerMixin):
# VM states
UNKNOWN = "unknown"
PENDING = "pending"
RUNNING = "running"
STOPPED = "stopped"
OFF = "off"
FAILED = "failed"
CONFIGURED = "configured"
UNCONFIGURED = "unconfigured"
DELETING = "deleting"
WAIT_TO_PID = "WAIT"
NOT_RUNNING_STATES = [OFF, FAILED, STOPPED]
SSH_REVERSE_BASE_PORT = 20000
logger = logging.getLogger('InfrastructureManager')
def __init__(self, inf, cloud_id, cloud, info, requested_radl, cloud_connector=None, im_id=None):
self._lock = threading.Lock()
"""Threading Lock to avoid concurrency problems."""
self.last_update = int(time.time())
"""Last update of the VM info"""
self.destroy = False
"""Flag to specify that this VM has been destroyed"""
self.state = self.PENDING
"""VM State"""
self.inf = inf
"""Infrastructure which this VM is part of"""
self.id = cloud_id
"""The ID of the VM assigned by the cloud provider"""
self.im_id = im_id
"""The ID of the VM assigned by the IM"""
self.creation_im_id = im_id
"""The ID of the VM assigned by the IM during creation"""
self.cloud = cloud
"""CloudInfo object with the information about the cloud provider"""
self.info = info.clone() if info else None
"""RADL object with the current information about the VM"""
# Set the initial state of the VM
if info:
self.info.systems[0].setValue("state", self.state)
self.requested_radl = requested_radl
"""Original RADL requested by the user"""
self.cont_out = ""
"""Contextualization output message"""
self.configured = None
"""Configure flag. If it is None the contextualization has not been finished yet"""
self.ctxt_pid = None
"""Number of the PID of the contextualization process being executed in this VM"""
self.ssh_connect_errors = 0
"""Number of errors in the ssh connection trying to get the state of the ctxt pid """
self.cloud_connector = cloud_connector
"""CloudConnector object to connect with the IaaS platform"""
self.creating = True
"""Flag to specify that this VM is creation process"""
self.error_msg = None
"""Message with the cause of the the error in the VM (if known) """
self.deleting = False
"""Flag to specify that this VM is deletion process"""
def serialize(self):
with self._lock:
odict = self.__dict__.copy()
# Quit the lock to the data to be store by pickle
del odict['_lock']
del odict['cloud_connector']
del odict['inf']
# To avoid errors tests with Mock objects
if 'get_ssh' in odict:
del odict['get_ssh']
if 'get_ctxt_log' in odict:
del odict['get_ctxt_log']
if odict['info']:
odict['info'] = str(odict['info'])
if odict['requested_radl']:
odict['requested_radl'] = str(odict['requested_radl'])
if odict['cloud']:
odict['cloud'] = odict['cloud'].serialize()
return json.dumps(odict)
@staticmethod
def deserialize(str_data):
dic = json.loads(str_data)
if dic['cloud']:
dic['cloud'] = IM.CloudInfo.CloudInfo.deserialize(dic['cloud'])
if dic['info']:
dic['info'] = parse_radl(dic['info'])
if dic['requested_radl']:
dic['requested_radl'] = parse_radl(dic['requested_radl'])
newvm = VirtualMachine(None, None, None, None, None, None, dic['im_id'])
# Set creating to False as default to VMs stored with 1.5.5 or old versions
newvm.creating = False
newvm.__dict__.update(dic)
# If we load a VM that is not configured, set it to False
# because the configuration process will be lost
if newvm.configured is None:
newvm.configured = False
return newvm
def getCloudConnector(self):
"""
Get the CloudConnector for this VM
"""
if not self.cloud_connector:
self.cloud_connector = self.cloud.getCloudConnector(self.inf)
return self.cloud_connector
def delete(self, delete_list, auth, exceptions):
"""
Delete the VM
"""
# In case of a VM is already destroyed
if self.destroy:
return (True, "")
# Select the last in the list to delete
remain_vms = [v for v in self.inf.get_vm_list() if v not in delete_list]
last = self.is_last_in_cloud(delete_list, remain_vms)
success = False
try:
self.deleting = True
VirtualMachine.logger.info("Inf ID: " + self.inf.id + ": Finalizing the VM id: " + str(self.id))
self.kill_check_ctxt_process()
(success, msg) = self.getCloudConnector().finalize(self, last, auth)
except Exception as e:
msg = str(e)
finally:
self.deleting = False
if success:
self.destroy = True
# force the update of the information
self.last_update = 0
if not success:
VirtualMachine.logger.info("Inf ID: " + self.inf.id + ": The VM cannot be finalized: %s" % msg)
exceptions.append(msg)
return success
def alter(self, radl, auth):
"""
Modify the features of the the VM
"""
# Get only the system with the same name as this VM
new_radl = radl.clone()
s = radl.get_system_by_name(self.info.systems[0].name)
if not s:
raise Exception("Incorrect RADL no system with name %s provided." % self.info.systems[0].name)
new_radl.systems = [s]
(success, alter_res) = self.getCloudConnector().alterVM(self, new_radl, auth)
# force the update of the information
self.last_update = 0
return (success, alter_res)
def stop(self, auth):
"""
Stop the VM
"""
(success, msg) = self.getCloudConnector().stop(self, auth)
# force the update of the information
self.last_update = 0
return (success, msg)
def start(self, auth):
"""
Start the VM
"""
(success, msg) = self.getCloudConnector().start(self, auth)
# force the update of the information
self.last_update = 0
return (success, msg)
def reboot(self, auth):
"""
Reboot the VM
"""
(success, msg) = self.getCloudConnector().reboot(self, auth)
# force the update of the information
self.last_update = 0
return (success, msg)
def create_snapshot(self, disk_num, image_name, auto_delete, auth):
"""
Create a snapshot of one disk of the VM
"""
return self.getCloudConnector().create_snapshot(self, disk_num, image_name, auto_delete, auth)
def getRequestedSystem(self):
"""
Get the system object with the requested RADL data
"""
return self.requested_radl.systems[0]
def hasPublicIP(self):
"""
Return True if this VM has a public IP
"""
return bool(self.info.getPublicIP())
def hasPublicNet(self):
"""
Return True if this VM is connected to some network defined as public
"""
return self.info.hasPublicNet(self.info.systems[0].name)
def hasIP(self, ip):
"""
Return True if this VM has an IP equals to the specified ip
"""
return self.info.systems[0].hasIP(ip)
def getPublicIP(self):
"""
Get the first net interface with public IP
"""
return self.info.getPublicIP()
def getPrivateIP(self):
"""
Get the first net interface with private IP
"""
return self.info.getPrivateIP()
def getNumNetworkIfaces(self):
"""
Get the number of net interfaces of this VM
"""
return self.info.systems[0].getNumNetworkIfaces()
def getNumNetworkWithConnection(self, connection):
"""
Get the number of the interface connected with the net id specified
"""
return self.info.systems[0].getNumNetworkWithConnection(connection)
def getProxyHost(self):
"""
Return the proxy_host data if available
"""
for netid in self.info.systems[0].getNetworkIDs():
net = self.info.get_network_by_id(netid)
if net.getValue("proxy_host"):
proxy_user, proxy_pass, proxy_ip, proxy_port = get_user_pass_host_port(net.getValue("proxy_host"))
if not proxy_port:
proxy_port = 22
return SSH(proxy_ip, proxy_user, proxy_pass, net.getValue("proxy_key"), proxy_port)
return None
def getIfaceIP(self, iface_num):
"""
Get the IP of the interface specified
"""
return self.info.systems[0].getIfaceIP(iface_num)
def getOS(self):
"""
Get O.S. of this VM (if not specified assume linux)
"""
os = self.info.systems[0].getValue("disk.0.os.name")
return os if os else "linux"
def getCredentialValues(self, new=False):
"""
Get The credentials to access of this VM by SSH
"""
return self.info.systems[0].getCredentialValues(new=new)
def getInstalledApplications(self):
"""
Get the list of installed applications in this VM.
(Obtained from the VMRC)
"""
return self.info.systems[0].getApplications()
def getRequestedApplications(self):
"""
Get the list of requested applications to be installed in this VM.
"""
return self.requested_radl.systems[0].getApplications()
def getRequestedName(self, default_hostname=None, default_domain=None):
"""
Get the requested name for this VM (interface 0)
"""
return self.getRequestedNameIface(0, default_hostname, default_domain)
def getRequestedNameIface(self, iface_num, default_hostname=None, default_domain=None):
"""
Get the requested name for the specified interface of this VM
"""
return self.requested_radl.systems[0].getRequestedNameIface(iface_num, self.im_id,
default_hostname, default_domain)
def isConnectedWith(self, vm):
"""
Check if this VM is connected with the specified VM with a network
"""
if not vm:
return False
# If both VMs have public IPs
if self.hasPublicIP() and vm.hasPublicIP():
return True
# Or if both VMs are connected to the same network
i = 0
while self.info.systems[0].getValue("net_interface." + str(i) + ".connection"):
net_name = self.info.systems[0].getValue("net_interface." + str(i) + ".connection")
common_net = False
j = 0
while vm.info.systems[0].getValue("net_interface." + str(j) + ".connection"):
other_net_name = vm.info.systems[0].getValue("net_interface." + str(j) + ".connection")
if other_net_name == net_name:
common_net = True
break
j += 1
if common_net:
return True
i += 1
return False
def getAppsToInstall(self):
"""
Get a list of applications to install in the VM
Returns: list of :py:class:`radl.radl.Application` with the applications
"""
# check apps requested
requested = self.getRequestedApplications()
# check apps installed in the VM
installed = self.getInstalledApplications()
to_install = []
for req_app in requested:
# discard the ansible modules
if not req_app.getValue("name").startswith("ansible.modules"):
is_installed = False
for inst_app in installed:
if inst_app.isNewerThan(req_app):
is_installed = True
if not is_installed:
to_install.append(req_app)
return to_install
def getModulesToInstall(self):
"""
Get a list of ansible modules to install in the VM
Arguments:
- vm_(:py:class:`IM.VirtualMachine`): VMs to check the modules.
Returns: list of str with the name of the galaxy roles (i.e.: micafer.hadoop)
"""
requested = self.getRequestedApplications()
to_install = []
for req_app in requested:
if req_app.getValue("name").startswith("ansible.modules."):
to_install.append(req_app.getValue("name")[16:])
return to_install
def getRemoteAccessPort(self):
"""
Get the remote access port from the RADL
Returns: int with the port
"""
if self.getOS().lower() == "windows":
return self.getWinRMPort()
else:
return self.getSSHPort()
def getWinRMPort(self):
"""
Get the WinRM port from the RADL
Returns: int with the port
"""
winrm_port = self.getOutPort(5986)
if not winrm_port:
winrm_port = 5986
return winrm_port
def getSSHPort(self):
"""
Get the SSH port from the RADL
Returns: int with the port
"""
ssh_port = self.getOutPort(22)
if not ssh_port:
ssh_port = 22
return ssh_port
def getOutPort(self, port, protocol="tcp"):
"""
Get the port from the RADL
Returns: int with the port
"""
res = None
public_net = None
for net in self.info.networks:
if net.isPublic():
public_net = net
if public_net:
outports = public_net.getOutPorts()
if outports:
for outport in outports:
if outport.get_local_port() == port and outport.get_protocol() == protocol:
res = outport.get_remote_port()
return res
def setSSHPort(self, ssh_port):
"""
Set the SSH port in the RADL info of this VM
"""
self.setOutPort(22, ssh_port)
def setOutPort(self, local, remote, protocol="tcp"):
"""
Set the port in the RADL info of this VM
"""
if remote != self.getOutPort(local):
now = str(int(time.time() * 100))
public_net = None
for net in self.info.networks:
if net.isPublic():
public_net = net
# If it do
if public_net is None:
public_net = network.createNetwork("public." + now, True)
self.info.networks.append(public_net)
outports_str = "%d-%d" % (remote, local)
outports = public_net.getOutPorts()
if outports:
for outport in outports:
if outport.get_local_port() != local or outport.get_protocol() != protocol:
if outport.get_protocol() == "tcp":
outports_str += "," + (str(outport.get_remote_port()) + "-" +
str(outport.get_local_port()))
else:
outports_str += "," + (str(outport.get_remote_port()) + "/udp" + "-" +
str(outport.get_local_port()) + "/udp")
public_net.setValue('outports', outports_str)
# get the ID
num_net = self.getNumNetworkWithConnection(public_net.id)
if num_net is None:
# There are a public net but it has not been used in this VM
num_net = self.getNumNetworkIfaces()
self.info.systems[0].setValue(
'net_interface.' + str(num_net) + '.connection', public_net.id)
def update_status(self, auth, force=False):
"""
Update the status of this virtual machine.
Only performs the update with UPDATE_FREQUENCY secs.
Args:
- auth(Authentication): parsed authentication tokens.
- force(boolean): force the VM update
Return:
- boolean: True if the information has been updated, false otherwise
"""
with self._lock:
# In case of a VM failed during creation, do not update
if self.state == VirtualMachine.FAILED and self.id is None:
return False
if self.deleting:
self.state = VirtualMachine.DELETING
return True
now = int(time.time())
state = self.state
updated = False
# To avoid to refresh the information too quickly
if force or now - self.last_update > Config.VM_INFO_UPDATE_FREQUENCY:
success = False
try:
(success, new_vm) = self.getCloudConnector().updateVMInfo(self, auth)
if success:
state = new_vm.state
updated = True
self.last_update = now
else:
self.log_error("Error updating VM status: %s" % new_vm)
except Exception:
self.log_exception("Error updating VM status.")
if not success and self.creating:
self.log_info("VM is in creation process, set pending state")
state = VirtualMachine.PENDING
# If we have problems to update the VM info too much time, set to
# unknown unless we are still creating the VM
if now - self.last_update > Config.VM_INFO_UPDATE_ERROR_GRACE_PERIOD and not self.creating:
new_state = VirtualMachine.UNKNOWN
self.log_warn("Grace period to update VM info passed. Set state to 'unknown'")
else:
if state not in [VirtualMachine.RUNNING, VirtualMachine.CONFIGURED, VirtualMachine.UNCONFIGURED]:
new_state = state
elif self.is_configured() is None:
new_state = VirtualMachine.RUNNING
elif self.is_configured():
new_state = VirtualMachine.CONFIGURED
else:
new_state = VirtualMachine.UNCONFIGURED
self.state = new_state
self.info.systems[0].setValue("state", new_state)
return updated
@staticmethod
def add_public_net(radl):
"""
Add a public net to the radl specified
"""
now = str(int(time.time() * 100))
public_nets = []
for net in radl.networks:
if net.isPublic():
public_nets.append(net)
if public_nets:
public_net = None
for net in public_nets:
num_net = radl.systems[0].getNumNetworkWithConnection(net.id)
if num_net is not None:
public_net = net
break
if not public_net:
# There are a public net but it has not been used in this
# VM
public_net = public_nets[0]
num_net = radl.systems[0].getNumNetworkIfaces()
else:
# There no public net, create one
public_net = network.createNetwork("public." + now, True)
radl.networks.append(public_net)
num_net = radl.systems[0].getNumNetworkIfaces()
return public_net, num_net
def setIps(self, public_ips, private_ips, remove_old=False, ignore_nets=None):
"""
Set the specified IPs in the VM RADL info
"""
if not ignore_nets:
ignore_nets = []
vm_system = self.info.systems[0]
# First remove old ip values
# in case that some IP has been removed from the VM
if remove_old:
cont = 0
while vm_system.getValue('net_interface.%d.connection' % cont):
if vm_system.getValue('net_interface.%d.ip' % cont):
vm_system.delValue('net_interface.%d.ip' % cont)
cont += 1
if public_ips and not set(public_ips).issubset(set(private_ips)):
public_net, num_net = self.add_public_net(self.info)
real_public_ips = [public_ip for public_ip in public_ips if public_ip not in private_ips]
if real_public_ips:
vm_system.setValue('net_interface.%s.connection' % num_net, public_net.id)
if len(real_public_ips) > 1:
self.log_warn("Node with more that one public IP!")
self.log_debug(real_public_ips)
if len(real_public_ips) == 2:
ip1 = IPAddress(real_public_ips[0])
ip2 = IPAddress(real_public_ips[1])
if ip1.version != ip2.version:
self.log_info("It seems that there are one IPv4 and other IPv6. Get the IPv4 one.")
if ip1.version == 4:
vm_system.setValue('net_interface.%s.ip' % num_net, str(real_public_ips[0]))
vm_system.setValue('net_interface.%s.ipv6' % num_net, str(real_public_ips[1]))
else:
vm_system.setValue('net_interface.%s.ip' % num_net, str(real_public_ips[1]))
vm_system.setValue('net_interface.%s.ipv6' % num_net, str(real_public_ips[0]))
else:
self.log_info("It seems that both are from the same version first one will be used")
vm_system.setValue('net_interface.%s.ip' % num_net, str(real_public_ips[0]))
else:
self.log_info("It seems that there are more that 2 last ones will be used")
for ip in real_public_ips:
if IPAddress(ip).version == 4:
vm_system.setValue('net_interface.%s.ip' % num_net, str(ip))
else:
vm_system.setValue('net_interface.%s.ipv6' % num_net, str(ip))
else:
# The usual case
if IPAddress(real_public_ips[0]).version == 6:
self.log_warn("Node only with one IPv6!!")
vm_system.setValue('net_interface.%s.ipv6' % num_net, str(real_public_ips[0]))
else:
vm_system.setValue('net_interface.%s.ip' % num_net, str(real_public_ips[0]))
if private_ips:
private_net_map = {}
for private_ip in private_ips:
private_net_mask = None
# Get the private network mask
for mask in Config.PRIVATE_NET_MASKS:
if IPAddress(private_ip) in IPNetwork(mask):
private_net_mask = mask
break
if not private_net_mask:
parts = private_ip.split(".")
private_net_mask = "%s.0.0.0/8" % parts[0]
self.log_warn("%s is not in known private net groups. Using mask: %s" % (
private_ip, private_net_mask))
# Search in previous used private ips
private_net = None
for net_mask, net in private_net_map.items():
if IPAddress(private_ip) in IPNetwork(net_mask):
private_net = net
# Search in the RADL nets, first in the nets this VM is
# connected to and check the CIDR of the nets
if private_net is None:
for net in self.info.networks:
if (not net.isPublic() and net not in private_net_map.values() and
net.id not in ignore_nets and self.getNumNetworkWithConnection(net.id) is not None and
net.getValue('cidr') and IPAddress(private_ip) in IPNetwork(net.getValue('cidr'))):
private_net = net
private_net_map[net.getValue('cidr')] = net
break
# Now in the RADL nets this VM is connected to
# but without CIDR set
if private_net is None:
for net in self.info.networks:
if (not net.isPublic() and net not in private_net_map.values() and
net.id not in ignore_nets and self.getNumNetworkWithConnection(net.id) is not None and
not net.getValue('cidr')):
private_net = net
private_net_map[private_net_mask] = net
break
# Search in the rest of RADL nets
if private_net is None:
# First check the CIDR
for net in self.info.networks:
if (not net.isPublic() and net not in private_net_map.values() and net.id not in ignore_nets and
net.getValue('cidr') and IPAddress(private_ip) in IPNetwork(net.getValue('cidr'))):
private_net = net
private_net_map[private_net_mask] = net
break
# The search in the rest
for net in self.info.networks:
if (not net.isPublic() and net not in private_net_map.values() and net.id not in ignore_nets and
not net.getValue('cidr')):
private_net = net
private_net_map[private_net_mask] = net
break
# if it is still None, then create a new one
if private_net is None:
private_net = network.createNetwork("private." + private_net_mask.split('/')[0])
self.info.networks.append(private_net)
num_net = self.getNumNetworkIfaces()
else:
# If there are are private net, get the ID
num_net = self.getNumNetworkWithConnection(private_net.id)
if num_net is None:
# There are a private net but it has not been used in
# this VM
num_net = self.getNumNetworkIfaces()
if IPAddress(private_ip).version == 6:
vm_system.setValue('net_interface.%s.ipv6' % num_net, str(private_ip))
else:
vm_system.setValue('net_interface.%s.ip' % num_net, str(private_ip))
vm_system.setValue('net_interface.%s.connection' % num_net, private_net.id)
def get_ssh(self, retry=False):
"""
Get SSH object to connect with this VM
"""
proxy_host = None
with self._lock:
(user, passwd, _, private_key) = self.getCredentialValues()
ip = self.getPublicIP()
if ip is None:
ip = self.getPrivateIP()
if ip and self.getProxyHost():
proxy_host = self.getProxyHost()
if ip is None:
self.log_warn("VM ID %s does not have IP. Do not return SSH Object." % self.im_id)
return None
if retry:
return SSHRetry(ip, user, passwd, private_key, self.getSSHPort(), proxy_host)
else:
return SSH(ip, user, passwd, private_key, self.getSSHPort(), proxy_host)
def is_ctxt_process_running(self):
""" Return the PID of the running process or None if it is not running """
return self.ctxt_pid
def launch_check_ctxt_process(self):
"""
Launch the check_ctxt_process as a thread
"""
t = threading.Thread(target=self.check_ctxt_process)
t.daemon = True
t.start()
def kill_check_ctxt_process(self):
"""
Kill the check_ctxt_process thread
"""
if self.ctxt_pid:
if self.ctxt_pid != self.WAIT_TO_PID:
ssh = self.get_ssh_ansible_master()
try:
if not ssh.test_connectivity(5):
self.log_info("Timeout killing ctxt process: %s." % self.ctxt_pid)
self.ctxt_pid = None
self.configured = False
return
self.log_info("Killing ctxt process with pid: %s" % self.ctxt_pid)
# Try to get PGID to kill all child processes
pgkill_success = False
(stdout, stderr, code) = ssh.execute('ps -o "%r" ' + str(int(self.ctxt_pid)), 5)
if code == 0:
out_parts = stdout.split("\n")
if len(out_parts) == 3:
try:
pgid = int(out_parts[1])
(stdout, stderr, code) = ssh.execute("kill -9 -" + str(pgid), 10)
if code == 0:
pgkill_success = True
else:
self.log_error("Error getting PGID of pid: " + str(self.ctxt_pid) +
": " + stderr + ". Using only PID.")
except Exception:
self.log_exception("Error getting PGID of pid: " + str(self.ctxt_pid) +
": " + stderr + ". Using only PID.")
else:
self.log_error("Error getting PGID of pid: " + str(self.ctxt_pid) + ": " +
stdout + ". Using only PID.")
else:
self.log_error("Error getting PGID of pid: " + str(self.ctxt_pid) + ": " +
stderr + ". Using only PID.")
if not pgkill_success:
ssh.execute("kill -9 " + str(int(self.ctxt_pid)), 5)
except Exception:
self.log_exception("Error killing ctxt process with pid: " + str(self.ctxt_pid))
self.ctxt_pid = None
self.configured = False
def check_ctxt_process(self):
"""
Periodically checks if the PID of the ctxt process is running
"""
if self.ctxt_pid == self.WAIT_TO_PID:
self.ctxt_pid = None
self.configured = False
initial_count_out = self.cont_out
wait = 0
while self.ctxt_pid:
if self.destroy:
# If the VM has been destroyed set pid to None and return
self.log_debug("VM %s deleted. Exit check_ctxt_process thread." % self.im_id)
self.ctxt_pid = None
return None
ctxt_pid = self.ctxt_pid
if ctxt_pid != self.WAIT_TO_PID:
ssh = self.get_ssh_ansible_master()
try:
self.log_info("Getting status of ctxt process with pid: " + str(ctxt_pid))
(_, _, exit_status) = ssh.execute("ps " + str(ctxt_pid))
self.ssh_connect_errors = 0
except Exception as ex:
self.log_warn("Error getting status of ctxt process with pid: %s. %s" % (ctxt_pid, ex))
exit_status = 0
self.ssh_connect_errors += 1
if self.ssh_connect_errors > Config.MAX_SSH_ERRORS:
self.log_error("Too much errors getting status of ctxt process with pid: " +
str(ctxt_pid) + ". Forget it.")
self.ssh_connect_errors = 0
self.configured = False
self.ctxt_pid = None
self.cont_out = initial_count_out + ("Too much errors getting the status of ctxt process."
" Check some network connection problems or if user "
"credentials has been changed.")
return None
ip = self.getPublicIP()
if not ip:
ip = ip = self.getPrivateIP()
remote_dir = "%s/%s/%s_%s" % (Config.REMOTE_CONF_DIR, self.inf.id, ip, self.im_id)
if exit_status != 0:
# The process has finished, get the outputs
self.log_info("The process %s has finished, get the outputs" % ctxt_pid)
ctxt_log = self.get_ctxt_log(remote_dir, ssh, True)
msg = self.get_ctxt_output(remote_dir, ssh, True)
if ctxt_log:
self.cont_out = initial_count_out + msg + ctxt_log
else:
self.cont_out = initial_count_out + msg + \
"Error getting contextualization process log."
self.ctxt_pid = None
else:
# Get the log of the process to update the cont_out
# dynamically
if Config.UPDATE_CTXT_LOG_INTERVAL > 0 and wait > Config.UPDATE_CTXT_LOG_INTERVAL:
wait = 0
self.log_info("Get the log of the ctxt process with pid: " + str(ctxt_pid))
ctxt_log = self.get_ctxt_log(remote_dir, ssh)
self.cont_out = initial_count_out + ctxt_log
# The process is still running, wait
self.log_info("The process %s is still running. wait." % ctxt_pid)
time.sleep(Config.CHECK_CTXT_PROCESS_INTERVAL)
wait += Config.CHECK_CTXT_PROCESS_INTERVAL
else:
# We are waiting the PID, sleep
time.sleep(Config.CHECK_CTXT_PROCESS_INTERVAL)
return self.ctxt_pid
def is_configured(self):
if self.inf.is_configured() is False:
return False
else:
if self.inf.vm_in_ctxt_tasks(self) or self.ctxt_pid:
# If there are ctxt tasks pending for this VM, return None
return None
else:
# Otherwise return the value of configured
return self.configured
def get_ctxt_log(self, remote_dir, ssh, delete=False):
tmp_dir = tempfile.mkdtemp()
conf_out = ""
# Download the contextualization agent log
try:
# Get the messages of the contextualization process
self.log_debug("Get File: " + remote_dir + '/ctxt_agent.log')
ssh.sftp_get(remote_dir + '/ctxt_agent.log', tmp_dir + '/ctxt_agent.log')
with open(tmp_dir + '/ctxt_agent.log') as f:
conf_out = f.read()
# Remove problematic chars
conf_out = str("".join(list(filter(lambda x: x in string.printable,
conf_out))).encode("ascii", "replace").decode("utf-8"))
try:
if delete:
ssh.sftp_remove(remote_dir + '/ctxt_agent.log')
except Exception:
self.log_exception(
"Error deleting remote contextualization process log: " + remote_dir + '/ctxt_agent.log')
except Exception:
self.log_exception(
"Error getting contextualization process log: " + remote_dir + '/ctxt_agent.log')
self.configured = False
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
return conf_out
def get_ctxt_output(self, remote_dir, ssh, delete=False):
tmp_dir = tempfile.mkdtemp()
msg = ""
# Download the contextualization agent log
try:
# Get the JSON output of the ctxt_agent
self.log_debug("Get File: " + remote_dir + '/ctxt_agent.out')
ssh.sftp_get(remote_dir + '/ctxt_agent.out', tmp_dir + '/ctxt_agent.out')
with open(tmp_dir + '/ctxt_agent.out') as f:
ctxt_agent_out = json.load(f)
try:
if delete:
ssh.sftp_remove(remote_dir + '/ctxt_agent.out')
except Exception:
self.log_exception(
"Error deleting remote contextualization process output: " + remote_dir + '/ctxt_agent.out')
# And process it
self.process_ctxt_agent_out(ctxt_agent_out)
msg = "Contextualization agent output processed successfully"
except IOError as ex:
msg = "Error getting contextualization agent output " + \
remote_dir + "/ctxt_agent.out: No such file."
self.log_error(msg)
self.configured = False
try:
# Get the output of the ctxt_agent to guess why the agent
# output is not there.
src = [remote_dir + '/stdout', remote_dir + '/stderr']
dst = [tmp_dir + '/stdout', tmp_dir + '/stderr']
ssh.sftp_get_files(src, dst)
stdout = ""
with open(tmp_dir + '/stdout') as f:
stdout += "\n" + f.read() + "\n"
with open(tmp_dir + '/stderr') as f:
stdout += f.read() + "\n"
self.log_error(stdout)
msg += stdout
except Exception:
self.log_exception("Error getting stdout and stderr to guess why the agent output is not there.")
except Exception as ex:
self.log_exception("Error getting contextualization agent output: " + remote_dir + '/ctxt_agent.out')
self.configured = False
msg = "Error getting contextualization agent output: " + str(ex)
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
return msg
def process_ctxt_agent_out(self, ctxt_agent_out):
"""
Get the output file of the ctxt_agent to process the results of the operations
"""
if 'CHANGE_CREDS' in ctxt_agent_out and ctxt_agent_out['CHANGE_CREDS']:
self.info.systems[0].updateNewCredentialValues()
if 'OK' in ctxt_agent_out and ctxt_agent_out['OK']:
self.configured = True
else:
self.configured = False
def get_vm_info(self):
res = RADL()
res.networks = self.info.networks
res.systems = self.info.systems
return res
def get_ansible_host(self):
ansible_host = None
if self.requested_radl.ansible_hosts:
ansible_host = self.requested_radl.ansible_hosts[0]
if self.requested_radl.systems[0].getValue("ansible_host"):
ansible_host = self.requested_radl.get_ansible_by_id(
self.requested_radl.systems[0].getValue("ansible_host"))
return ansible_host
def get_ssh_ansible_master(self, retry=True):
ansible_host = self.get_ansible_host()
if ansible_host:
(user, passwd, private_key) = ansible_host.getCredentialValues()
if retry:
return SSHRetry(ansible_host.getHost(), user, passwd, private_key)
else:
return SSH(ansible_host.getHost(), user, passwd, private_key)
else:
if self.inf.vm_master:
return self.inf.vm_master.get_ssh(retry=retry)
else:
self.log_warn("There is not master VM. Do not return SSH object.")
return None
def __lt__(self, other):
return True
def get_cont_msg(self):
if self.error_msg:
res = self.error_msg + "\n" + self.cont_out
else:
res = self.cont_out
if self.cloud_connector and self.cloud_connector.error_messages:
res += self.cloud_connector.error_messages
return res
def is_last_in_cloud(self, delete_list, remain_vms):
"""
Check if this VM is the last in the cloud provider
to send the correct flag to the finalize function to clean
resources correctly
"""
for v in remain_vms:
if v.cloud.type == self.cloud.type and v.cloud.server == self.cloud.server:
# There are at least one VM in the same cloud
# that will remain. This is not the last one
return False
# Get the list of VMs on the same cloud to be deleted
delete_list_cloud = [v for v in delete_list if (v.cloud.type == self.cloud.type and
v.cloud.server == self.cloud.server)]
# And return true in the last of these VMs
return self == delete_list_cloud[-1]
def get_boot_curl_commands(self):
from IM.REST import REST_URL
rest_url = REST_URL if REST_URL else ""
url = rest_url + '/infrastructures/' + str(self.inf.id) + '/vms/' + str(self.creation_im_id) + '/command'
auth = self.inf.auth.getAuthInfo("InfrastructureManager")[0]
if 'token' in auth:
imauth = "token = %s" % auth['token']
else:
imauth = "username = %s; password = %s" % (auth['username'], auth['password'])
command = ('curl -s --insecure -H "Authorization: type = InfrastructureManager; %s" '
'-H "Accept: text/plain" %s' % (imauth, url))
return [command + " | bash &"]
def getSSHReversePort(self):
return self.SSH_REVERSE_BASE_PORT + int(self.creation_im_id)
def get_ssh_command(self):
ssh = self.get_ssh_ansible_master(retry=False)
if not ssh:
return None
ssh_port = ssh.port
reverse_opt = "-R %d:localhost:22" % (self.SSH_REVERSE_BASE_PORT + self.creation_im_id)
if ssh.private_key:
filename = "/tmp/%s_%s.pem" % (self.inf.id, self.im_id)
command = 'echo "%s" > %s && chmod 400 %s ' % (ssh.private_key, filename, filename)
command += ('&& ssh -N %s -p %s -i %s -o "UserKnownHostsFile=/dev/null"'
' -o "StrictHostKeyChecking=no" %s@%s &' % (reverse_opt,
ssh_port,
filename,
ssh.username,
ssh.host))
else:
command = ('sshpass -p%s ssh -N %s -p %s -o "UserKnownHostsFile=/dev/null"'
' -o "StrictHostKeyChecking=no" %s@%s &' % (ssh.password,
reverse_opt,
ssh_port,
ssh.username,
ssh.host))
return command
@staticmethod
def delete_public_nets(radl):
"""
Helper function to correctly delete references to public nets in an RADL
"""
nets_id = [net.id for net in radl.networks if net.isPublic()]
system = radl.systems[0]
i = 0
while system.getValue('net_interface.%d.connection' % i):
next_net = system.getValue('net_interface.%d.connection' % (i + 1))
next_dns = system.getValue('net_interface.%d.connection' % (i + 1))
f = system.getFeature("net_interface.%d.connection" % i)
if f.value in nets_id:
if next_net:
system.setValue('net_interface.%d.connection' % i, next_net)
system.setValue('net_interface.%d.dns_name' % i, next_dns)
else:
system.delValue('net_interface.%d.connection' % i)
system.delValue('net_interface.%d.dns_name' % i)
if system.getValue('net_interface.%d.ip' % i):
system.delValue('net_interface.%d.ip' % i)
i += 1
| gpl-3.0 | -3,530,185,177,929,877,000 | 39.952889 | 120 | 0.525352 | false |
fabioz/Pydev | plugins/org.python.pydev.core/pysrc/build_tools/generate_code.py | 1 | 6504 | '''
This module should be run to recreate the files that we generate automatically
(i.e.: modules that shouldn't be traced and cython .pyx)
'''
from __future__ import print_function
import os
import struct
import re
def is_python_64bit():
return (struct.calcsize('P') == 8)
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def get_cython_contents(filename):
if filename.endswith('.pyc'):
filename = filename[:-1]
state = 'regular'
replacements = []
new_contents = []
with open(filename, 'r') as stream:
for line in stream:
strip = line.strip()
if state == 'regular':
if strip == '# IFDEF CYTHON':
state = 'cython'
new_contents.append('%s -- DONT EDIT THIS FILE (it is automatically generated)\n' % line.replace('\n', '').replace('\r', ''))
continue
new_contents.append(line)
elif state == 'cython':
if strip == '# ELSE':
state = 'nocython'
new_contents.append(line)
continue
elif strip == '# ENDIF':
state = 'regular'
new_contents.append(line)
continue
if strip == '#':
continue
assert strip.startswith('# '), 'Line inside # IFDEF CYTHON must start with "# ". Found: %s' % (strip,)
strip = strip.replace('# ', '', 1).strip()
if strip.startswith('cython_inline_constant:'):
strip = strip.replace('cython_inline_constant:', '')
word_to_replace, replacement = strip.split('=')
replacements.append((word_to_replace.strip(), replacement.strip()))
continue
line = line.replace('# ', '', 1)
new_contents.append(line)
elif state == 'nocython':
if strip == '# ENDIF':
state = 'regular'
new_contents.append(line)
continue
new_contents.append('# %s' % line)
assert state == 'regular', 'Error: # IFDEF CYTHON found without # ENDIF'
ret = ''.join(new_contents)
for (word_to_replace, replacement) in replacements:
ret = re.sub(r"\b%s\b" % (word_to_replace,), replacement, ret)
return ret
def _generate_cython_from_files(target, modules):
contents = ['''from __future__ import print_function
# Important: Autogenerated file.
# DO NOT edit manually!
# DO NOT edit manually!
''']
for mod in modules:
contents.append(get_cython_contents(mod.__file__))
with open(target, 'w') as stream:
stream.write(''.join(contents))
def generate_dont_trace_files():
template = '''# Important: Autogenerated file.
# DO NOT edit manually!
# DO NOT edit manually!
from _pydevd_bundle.pydevd_constants import IS_PY3K
LIB_FILE = 1
PYDEV_FILE = 2
DONT_TRACE_DIRS = {
%(pydev_dirs)s
}
DONT_TRACE = {
# commonly used things from the stdlib that we don't want to trace
'Queue.py':LIB_FILE,
'queue.py':LIB_FILE,
'socket.py':LIB_FILE,
'weakref.py':LIB_FILE,
'_weakrefset.py':LIB_FILE,
'linecache.py':LIB_FILE,
'threading.py':LIB_FILE,
'dis.py':LIB_FILE,
# things from pydev that we don't want to trace
'_pydev_execfile.py':PYDEV_FILE,
%(pydev_files)s
}
if IS_PY3K:
# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)
DONT_TRACE['io.py'] = LIB_FILE
# Don't trace common encodings too
DONT_TRACE['cp1252.py'] = LIB_FILE
DONT_TRACE['utf_8.py'] = LIB_FILE
DONT_TRACE['codecs.py'] = LIB_FILE
'''
pydev_files = []
pydev_dirs = []
exclude_dirs = [
'.git',
'.settings',
'build',
'build_tools',
'dist',
'pydevd.egg-info',
'pydevd_attach_to_process',
'pydev_sitecustomize',
'stubs',
'tests',
'tests_mainloop',
'tests_python',
'tests_runfiles',
'test_pydevd_reload',
'third_party',
'__pycache__',
'pydev_ipython',
'vendored',
'.mypy_cache',
'pydevd.egg-info',
]
for root, dirs, files in os.walk(root_dir):
for d in dirs:
if 'pydev' in d and d != 'pydevd.egg-info':
# print(os.path.join(root, d))
pydev_dirs.append(" '%s': PYDEV_FILE," % (d,))
for d in exclude_dirs:
try:
dirs.remove(d)
except:
pass
for f in files:
if f.endswith('.py'):
if f not in (
'__init__.py',
'runfiles.py',
'pydev_coverage.py',
'pydev_pysrc.py',
'setup.py',
'setup_cython.py',
'interpreterInfo.py',
'conftest.py',
):
pydev_files.append(" '%s': PYDEV_FILE," % (f,))
contents = template % (dict(
pydev_files='\n'.join(sorted(pydev_files)),
pydev_dirs='\n'.join(sorted(pydev_dirs)),
))
assert 'pydevd.py' in contents
assert 'pydevd_dont_trace.py' in contents
with open(os.path.join(root_dir, '_pydevd_bundle', 'pydevd_dont_trace_files.py'), 'w') as stream:
stream.write(contents)
def remove_if_exists(f):
try:
if os.path.exists(f):
os.remove(f)
except:
import traceback;traceback.print_exc()
def generate_cython_module():
remove_if_exists(os.path.join(root_dir, '_pydevd_bundle', 'pydevd_cython.pyx'))
target = os.path.join(root_dir, '_pydevd_bundle', 'pydevd_cython.pyx')
curr = os.environ.get('PYDEVD_USE_CYTHON')
try:
os.environ['PYDEVD_USE_CYTHON'] = 'NO'
from _pydevd_bundle import pydevd_additional_thread_info_regular
from _pydevd_bundle import pydevd_frame, pydevd_trace_dispatch_regular
_generate_cython_from_files(target, [pydevd_additional_thread_info_regular, pydevd_frame, pydevd_trace_dispatch_regular])
finally:
if curr is None:
del os.environ['PYDEVD_USE_CYTHON']
else:
os.environ['PYDEVD_USE_CYTHON'] = curr
if __name__ == '__main__':
generate_dont_trace_files()
generate_cython_module()
| epl-1.0 | -7,308,622,302,311,340,000 | 27.034483 | 145 | 0.535209 | false |
demisto/content | Packs/FeedAWS/Integrations/FeedAWS/FeedAWS.py | 1 | 1576 | import demistomock as demisto
from CommonServerPython import *
def get_feed_config(services: list, regions: list):
"""
Creates the configuration for each AWS service.
Args:
services: The selected services.
regions: The selected regions.
Returns:
The feed configuration.
"""
available_feeds = {
'AMAZON',
'EC2',
'ROUTE53',
'ROUTE53_HEALTHCHECKS',
'CLOUDFRONT',
'S3'
}
region_path = ''
if regions:
region_path = f" && contains({regions}, region)"
feed_name_to_config = {}
for feed in available_feeds:
feed_name_to_config[feed] = {
'url': 'https://ip-ranges.amazonaws.com/ip-ranges.json',
'extractor': f"prefixes[?service=='{feed}'{region_path}]",
'indicator': 'ip_prefix',
'indicator_type': FeedIndicatorType.CIDR,
'fields': ['region', 'service'],
'mapping': {
'region': 'region',
'service': 'service'
}
}
return {feed_name: feed_name_to_config.get(feed_name) for feed_name in services}
from JSONFeedApiModule import * # noqa: E402
def main():
params = {k: v for k, v in demisto.params().items() if v is not None}
params['feed_name_to_config'] = get_feed_config(params.get('services', ['AMAZON']),
argToList(params.get('regions', [])))
feed_main(params, 'AWS Feed', 'aws')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| mit | -1,924,600,173,518,194,400 | 26.649123 | 89 | 0.543147 | false |
SirCmpwn/hooks | hooks.py | 1 | 2560 | from flask import Flask, request, abort
from configparser import ConfigParser
import sys
import os
from subprocess import Popen, PIPE, STDOUT
import urllib
import json
import logging
config_paths = ["./config.ini", "/etc/hooks.conf"]
config = ConfigParser()
for p in config_paths:
try:
config.readfp(open(p))
break
except:
pass
app = Flask(__name__)
class Hook():
def __init__(self, name, config):
self.name = name
self.repository = config.get(name, "repository")
self.branch = config.get(name, "branch")
self.command = config.get(name, "command")
self.valid_ips = config.get(name, "valid_ips")
hooks = list()
for key in config:
if key == 'DEFAULT':
continue
hooks.append(Hook(key, config))
print("Loaded {} hooks".format(len(hooks)))
def makeMask(n):
return (2 << n - 1) - 1
def dottedQuadToNum(ip):
parts = ip.split(".")
return int(parts[0]) | (int(parts[1]) << 8) | (int(parts[2]) << 16) | (int(parts[3]) << 24)
def networkMask(ip, bits):
return dottedQuadToNum(ip) & makeMask(bits)
def addressInNetwork(ip, net):
return ip & net == net
@app.route('/hook', methods=['POST'])
def hook_publish():
raw = request.data.decode("utf-8")
try:
event = json.loads(raw)
except:
return "Hook rejected: invalid JSON", 400
repository = "{}/{}".format(event["repository"]["owner"]["name"], event["repository"]["name"])
matches = [h for h in hooks if h.repository == repository]
if len(matches) == 0:
return "Hook rejected: unknown repository {}".format(repository)
hook = matches[0]
allow = False
remote = request.remote_addr
if remote == "127.0.0.1" and "X-Real-IP" in request.headers:
remote = request.headers.get("X-Real-IP")
for ip in hook.valid_ips.split(","):
parts = ip.split("/")
range = 32
if len(parts) != 1:
range = int(parts[1])
addr = networkMask(parts[0], range)
if addressInNetwork(dottedQuadToNum(remote), addr):
allow = True
if not allow:
return "Hook rejected: unauthorized IP", 403
if any("[noupdate]" in c["message"] for c in event["commits"]):
return "Hook ignored: commit specifies [noupdate]"
if "refs/heads/" + hook.branch == event["ref"]:
print("Executing hook for " + hook.name)
p=Popen(hook.command.split(), stdin=PIPE)
p.communicate(input=raw.encode())
return "Hook accepted"
return "Hook ignored: wrong branch"
| mit | 2,155,854,252,527,766,500 | 27.764045 | 98 | 0.610156 | false |
labyrinth-team/labyrinth | labyrinth_lib/MMapArea.py | 1 | 54708 | #! /usr/bin/env python
# MMapArea.py
# This file is part of Labyrinth
#
# Copyright (C) 2006 - Don Scorgie <[email protected]>
#
# Labyrinth is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Labyrinth is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Labyrinth; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
import math
import gtk
import pango
import gobject
import gettext
import copy
_ = gettext.gettext
import xml.dom.minidom as dom
import Links
import TextThought
import ImageThought
import DrawingThought
import ResourceThought
import UndoManager
import utils
RAD_UP = (- math.pi / 2.)
RAD_DOWN = (math.pi / 2.)
RAD_LEFT = (math.pi)
RAD_RIGHT = (0)
MODE_EDITING = 0
MODE_IMAGE = 1
MODE_DRAW = 2
MODE_RESOURCE = 3
# Until all references of MODE_MOVING are removed...
MODE_MOVING = 999
VIEW_LINES = 0
VIEW_BEZIER = 1
TYPE_TEXT = 0
TYPE_IMAGE = 1
TYPE_DRAWING = 2
TYPE_RESOURCE = 3
# TODO: Need to expand to support popup menus
MENU_EMPTY_SPACE = 0
# UNDO actions
UNDO_MOVE = 0
UNDO_CREATE = 1
UNDO_DELETE = 2
UNDO_DELETE_SINGLE = 3
UNDO_COMBINE_DELETE_NEW = 4
UNDO_DELETE_LINK = 5
UNDO_STRENGTHEN_LINK = 6
UNDO_CREATE_LINK = 7
UNDO_ALIGN = 8
# Note: This is (atm) very broken. It will allow you to create new canvases, but not
# create new thoughts or load existing maps.
# To get it working either fix the TODO list at the bottom of the class, implement the
# necessary features within all the thought types. If you do, please send a patch ;)
# OR: Change this class to MMapAreaNew and MMapAreaOld to MMapArea
class MMapArea (gtk.DrawingArea):
'''A MindMapArea Widget. A blank canvas with a collection of child thoughts.\
It is responsible for processing signals and such from the whole area and \
passing these on to the correct child. It also informs things when to draw'''
__gsignals__ = dict (title_changed = (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING, )),
doc_save = (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
doc_delete = (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
()),
change_mode = (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT, )),
change_buffer = (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_OBJECT, )),
text_selection_changed = (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_INT, gobject.TYPE_INT, gobject.TYPE_STRING)),
thought_selection_changed = (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
set_focus = (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT, gobject.TYPE_BOOLEAN)),
set_attrs = (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_BOOLEAN, gobject.TYPE_BOOLEAN, gobject.TYPE_BOOLEAN, pango.FontDescription)))
def __init__(self, undo):
super (MMapArea, self).__init__()
self.thoughts = []
self.links = []
self.selected = []
self.num_selected = 0
self.primary = None
self.editing = None
self.pango_context = self.create_pango_context()
self.undo = undo
self.scale_fac = 1.0
self.translate = False
self.translation = [0.0,0.0]
self.timeout = -1
self.current_cursor = None
self.do_filter = True
self.is_bbox_selecting = False
self.unending_link = None
self.nthoughts = 0
impl = dom.getDOMImplementation()
self.save = impl.createDocument("http://www.donscorgie.blueyonder.co.uk/labns", "MMap", None)
self.element = self.save.documentElement
self.im_context = gtk.IMMulticontext ()
self.mode = MODE_EDITING
self.old_mode = MODE_EDITING
self.connect ("expose_event", self.expose)
self.connect ("button_release_event", self.button_release)
self.connect ("button_press_event", self.button_down)
self.connect ("motion_notify_event", self.motion_event)
self.connect ("key_press_event", self.key_press)
self.connect ("key_release_event", self.key_release)
self.connect ("scroll_event", self.scroll)
self.commit_handler = None
self.title_change_handler = None
self.moving = False
self.move_origin = None
self.move_origin_new = None
self.motion = None
self.move_action = None
self.current_root = []
self.rotation = 0
self.set_events (gtk.gdk.KEY_PRESS_MASK |
gtk.gdk.KEY_RELEASE_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.SCROLL_MASK)
self.set_flags (gtk.CAN_FOCUS)
# set theme colors
w = gtk.Window()
w.realize()
style = w.get_style()
self.pango_context.set_font_description(style.font_desc)
self.font_name = style.font_desc.to_string()
utils.default_font = self.font_name
utils.default_colors["text"] = utils.gtk_to_cairo_color(style.text[gtk.STATE_NORMAL])
utils.default_colors["base"] = utils.gtk_to_cairo_color(style.base[gtk.STATE_NORMAL])
self.background_color = style.base[gtk.STATE_NORMAL]
self.foreground_color = style.text[gtk.STATE_NORMAL]
utils.default_colors["bg"] = utils.gtk_to_cairo_color(style.bg[gtk.STATE_NORMAL])
utils.default_colors["fg"] = utils.gtk_to_cairo_color(style.fg[gtk.STATE_NORMAL])
utils.selected_colors["text"] = utils.gtk_to_cairo_color(style.text[gtk.STATE_SELECTED])
utils.selected_colors["bg"] = utils.gtk_to_cairo_color(style.bg[gtk.STATE_SELECTED])
utils.selected_colors["fg"] = utils.gtk_to_cairo_color(style.fg[gtk.STATE_SELECTED])
utils.selected_colors["fill"] = utils.gtk_to_cairo_color(style.base[gtk.STATE_SELECTED])
def transform_coords(self, loc_x, loc_y):
"""Transform view co-ordinates (e.g. from a mouse event) to canvas
co-ordinates.
"""
if hasattr(self, "transform"):
return self.transform.transform_point(loc_x, loc_y)
def untransform_coords(self, loc_x, loc_y):
"""Transform canvas co-ordinates to view co-ordinates."""
if hasattr(self, "untransform"):
return self.untransform.transform_point(loc_x, loc_y)
def button_down (self, widget, event):
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
ret = False
obj = self.find_object_at (coords)
if event.button == 2:
# Middle button: prepare to drag canvas
self.set_cursor (gtk.gdk.FLEUR)
self.original_translation = self.translation
self.origin_x = event.x
self.origin_y = event.y
return
if obj and obj.want_motion ():
# Object is ready to receive drag events (e.g. for drawing)
self.motion = obj
ret = obj.process_button_down (event, self.mode, coords)
if event.button == 1 and self.mode == MODE_EDITING:
self.moving = not (event.state & gtk.gdk.CONTROL_MASK)
self.move_origin = (coords[0], coords[1])
self.move_origin_new = self.move_origin
return ret
if obj:
# Edit or drag object
if event.button == 1 and self.mode == MODE_EDITING:
self.moving = not (event.state & gtk.gdk.CONTROL_MASK)
self.move_origin = (coords[0], coords[1])
self.move_origin_new = self.move_origin
ret = obj.process_button_down (event, self.mode, coords)
elif event.button == 1 and self.mode == MODE_EDITING and not self.editing:
# Drag a box to select thoughts
self.bbox_origin = coords
self.is_bbox_selecting = True
elif event.button == 3:
# Right click menu
ret = self.create_popup_menu (None, event, MENU_EMPTY_SPACE)
return ret
def undo_move (self, action, mode):
self.undo.block ()
move_thoughts = action.args[1]
old_coords = action.args[0]
new_coords = action.args[2]
move_x = old_coords[0] - new_coords[0]
move_y = old_coords[1] - new_coords[1]
if mode == UndoManager.REDO:
move_x = -move_x
move_y = -move_y
self.unselect_all ()
for t in move_thoughts:
self.select_thought (t, -1)
t.move_by (move_x, move_y)
self.undo.unblock ()
self.invalidate ((old_coords[0], old_coords[1], new_coords[0], new_coords[1]))
def button_release (self, widget, event):
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
if self.mode == MODE_EDITING:
self.set_cursor(gtk.gdk.LEFT_PTR)
else:
self.set_cursor(gtk.gdk.CROSSHAIR)
ret = False
if self.is_bbox_selecting:
# Finished with a selection box
self.is_bbox_selecting = False
self.invalidate ()
try:
if abs(self.bbox_origin[0] - coords[0]) > 2.0:
return True
except AttributeError: # no bbox_current
pass
if self.translate:
# Finish dragging canvas around
self.translate = False
return True
if self.moving and self.move_action:
# Finish moving objects
self.move_action.add_arg (coords)
self.undo.add_undo (self.move_action)
self.move_action = None
self.motion = None
self.moving = False
self.move_origin = None
obj = self.find_object_at (coords)
if event.button == 2:
self.undo.add_undo (UndoManager.UndoAction (self, UndoManager.TRANSFORM_CANVAS,
self.undo_transform_cb,
self.scale_fac, self.scale_fac,
self.original_translation,
self.translation))
if obj:
ret = obj.process_button_release (event, self.unending_link, self.mode, coords)
if len(self.selected) != 1:
self.invalidate() # does not invalidate correctly with obj.get_max_area()
return ret
elif self.unending_link or event.button == 1:
# Create a new thought.
sel = self.selected
thought = self.create_new_thought (coords)
if not thought:
return True
if not self.primary:
self.make_primary (thought)
self.select_thought (thought, None)
else:
self.emit ("change_buffer", thought.extended_buffer)
self.hookup_im_context (thought)
# Creating links adds an undo action. Block it here
self.undo.block ()
for x in self.current_root:
self.create_link (x, None, thought)
for x in self.selected:
x.unselect ()
self.selected = [thought]
thought.select ()
if self.unending_link:
self.unending_link.set_child (thought)
self.links.append (self.unending_link)
element = self.unending_link.get_save_element ()
self.element.appendChild (element)
self.unending_link = None
self.undo.unblock ()
thought.foreground_color = self.foreground_color
thought.background_color = self.background_color
act = UndoManager.UndoAction (self, UNDO_CREATE, self.undo_create_cb, thought, sel, \
self.mode, self.old_mode, event.get_coords())
for l in self.links:
if l.uses (thought):
act.add_arg (l)
if self.undo.peak ().undo_type == UNDO_DELETE_SINGLE:
last_action = self.undo.pop ()
action = UndoManager.UndoAction (self, UNDO_COMBINE_DELETE_NEW, self.undo_joint_cb, \
last_action, act)
self.undo.add_undo (action)
else:
self.undo.add_undo (act)
self.begin_editing (thought)
self.invalidate ()
return ret
def undo_transform_cb (self, action, mode):
if mode == UndoManager.UNDO:
self.scale_fac = action.args[0]
self.translation = action.args[2]
else:
self.scale_fac = action.args[1]
self.translation = action.args[3]
self.invalidate ()
def scroll (self, widget, event):
"""Mouse wheel events - zoom in/out"""
scale = self.scale_fac
if event.direction == gtk.gdk.SCROLL_UP:
# Zoom in
if self.scale_fac > 10:
return # Limit zoom in to 10x
self.scale_fac*=1.2
# Zoom based on where cursor is located.
coords = self.transform_coords(event.x, event.y)
geom = self.window.get_geometry()
middle = self.transform_coords(geom[2]/2.0, geom[3]/2.0)
# Without the /4.0, the window jumps to where the cursor is
# centred, which is very awkward and hard to use. This method makes
# the centre move smoothly towards the cursor's location.
self.translation[0] -= (coords[0] - middle[0])/4.0
self.translation[1] -= (coords[1] - middle[1])/4.0
elif event.direction == gtk.gdk.SCROLL_DOWN:
# Zoom out
if self.scale_fac <= 0.1:
return # Limit zoom out to 1/10th scale
self.scale_fac/=1.2
self.undo.add_undo (UndoManager.UndoAction (self, UndoManager.TRANSFORM_CANVAS, \
self.undo_transform_cb,
scale, self.scale_fac, self.translation,
self.translation))
self.invalidate()
def undo_joint_cb (self, action, mode):
delete = action.args[0]
create = action.args[1]
if mode == UndoManager.UNDO:
self.undo_create_cb (create, mode)
self.undo_deletion (delete, mode)
else:
self.undo_deletion (delete, mode)
self.undo_create_cb (create, mode)
self.invalidate ()
def key_press (self, widget, event):
if not self.do_filter or not self.im_context.filter_keypress (event):
if self.editing:
if not self.editing.process_key_press (event, self.mode):
return self.global_key_handler (event)
return True
if len(self.selected) != 1 or not self.selected[0].process_key_press (event, self.mode):
return self.global_key_handler (event)
return True
def key_release (self, widget, event):
self.im_context.filter_keypress (event)
return True
def motion_event(self, widget, event):
"""Handle a mouse movement. There are various possibilities depending
on the state."""
coords = self.transform_coords (event.get_coords()[0], event.get_coords()[1])
if self.motion:
if self.motion.handle_motion (event, self.mode, coords):
return True
obj = self.find_object_at (coords)
if self.unending_link and not self.is_bbox_selecting:
# Ctrl-dragging to create a new link
self.unending_link.set_end (coords)
self.invalidate ()
return True
elif event.state & gtk.gdk.BUTTON1_MASK and self.is_bbox_selecting:
# Dragging selection box
self.bbox_current = coords
self.invalidate()
ul = [ self.bbox_origin[0], self.bbox_origin[1] ]
lr = [ coords[0], coords[1] ]
if self.bbox_origin[0] > coords[0]:
if self.bbox_origin[1] < coords[1]:
ul[0] = coords[0]
ul[1] = self.bbox_origin[1]
lr[0] = self.bbox_origin[0]
lr[1] = coords[1]
else:
ul = coords
lr = self.bbox_origin
elif self.bbox_origin[1] > coords[1]:
ul[0] = self.bbox_origin[0]
ul[1] = coords[1]
lr[0] = coords[0]
lr[1] = self.bbox_origin[1]
# FIXME: O(n) runtime is bad
for t in self.thoughts:
if t.lr[0] > ul[0] and t.ul[1] < lr[1] and t.ul[0] < lr[0] and t.lr[1] > ul[1] :
if t not in self.selected:
self.select_thought(t, gtk.gdk.SHIFT_MASK)
else:
if t in self.selected:
t.unselect()
self.selected.remove(t)
return True
elif self.moving and not self.editing and not self.unending_link:
# Moving thought(s) around
self.set_cursor(gtk.gdk.FLEUR)
if not self.move_action:
self.move_action = UndoManager.UndoAction (self, UNDO_MOVE, self.undo_move, self.move_origin,
self.selected)
for t in self.selected:
t.move_by (coords[0] - self.move_origin_new[0], coords[1] - self.move_origin_new[1])
self.move_origin_new = (coords[0], coords[1])
self.invalidate ()
return True
elif self.editing and event.state & gtk.gdk.BUTTON1_MASK and not obj and not self.is_bbox_selecting:
# We were too quick with the movement. We really actually want to
# create the unending link
self.create_link (self.editing)
self.finish_editing ()
elif event.state & gtk.gdk.BUTTON2_MASK:
# Middle mouse button held down: drag canvas around.
self.translate = True
self.translation[0] -= (self.origin_x - event.x) / self.scale_fac
self.translation[1] -= (self.origin_y - event.y) / self.scale_fac
self.origin_x = event.x
self.origin_y = event.y
self.invalidate()
return True
if obj:
# Pass the motion to the object, e.g. for drawing
obj.handle_motion (event, self.mode, coords)
elif self.mode == MODE_IMAGE or self.mode == MODE_DRAW:
self.set_cursor(gtk.gdk.CROSSHAIR)
else:
self.set_cursor(gtk.gdk.LEFT_PTR)
def find_object_at (self, coords):
for x in reversed(self.thoughts):
if x.includes (coords, self.mode):
return x
for x in self.links:
if x.includes (coords, self.mode):
return x
return None
def realize_cb (self, widget):
self.disconnect (self.realize_handle)
if self.mode == MODE_IMAGE or self.mode == MODE_DRAW:
self.set_cursor (gtk.gdk.CROSSHAIR)
else:
self.set_cursor (gtk.gdk.LEFT_PTR)
return False
def set_cursor(self, kind):
new_cursor = CursorFactory().get_cursor(kind)
if self.current_cursor != new_cursor:
self.current_cursor = new_cursor
self.window.set_cursor(self.current_cursor)
def set_mode (self, mode):
if mode == self.mode:
return
self.old_mode = self.mode
self.mode = mode
self.finish_editing ()
self.hookup_im_context ()
if self.window:
if mode == MODE_IMAGE or mode == MODE_DRAW:
self.set_cursor (gtk.gdk.CROSSHAIR)
else:
self.set_cursor (gtk.gdk.LEFT_PTR)
else:
self.realize_handle = self.connect ("realize", self.realize_cb)
self.mode = mode
if self.window:
self.invalidate ()
def title_changed_cb (self, widget, new_title):
self.emit ("title_changed", new_title)
def make_primary (self, thought):
if self.primary:
print "Warning: Already have a primary root"
if self.title_change_handler:
self.primary.disconnect (self.title_change_handler)
self.title_change_handler = thought.connect ("title_changed", self.title_changed_cb)
self.emit ("title_changed", thought.text)
self.primary = thought
thought.make_primary ()
def hookup_im_context (self, thought = None):
if self.commit_handler:
self.im_context.disconnect (self.commit_handler)
self.im_context.disconnect (self.delete_handler)
self.im_context.disconnect (self.preedit_changed_handler)
self.im_context.disconnect (self.preedit_end_handler)
self.im_context.disconnect (self.preedit_start_handler)
self.im_context.disconnect (self.retrieve_handler)
self.commit_handler = None
if thought:
try:
self.commit_handler = self.im_context.connect ("commit", thought.commit_text, self.mode, self.font_name)
self.delete_handler = self.im_context.connect ("delete-surrounding", thought.delete_surroundings, self.mode)
self.preedit_changed_handler = self.im_context.connect ("preedit-changed", thought.preedit_changed, self.mode)
self.preedit_end_handler = self.im_context.connect ("preedit-end", thought.preedit_end, self.mode)
self.preedit_start_handler = self.im_context.connect ("preedit-start", thought.preedit_start, self.mode)
self.retrieve_handler = self.im_context.connect ("retrieve-surrounding", thought.retrieve_surroundings, \
self.mode)
self.do_filter = True
except AttributeError:
self.do_filter = False
else:
self.do_filter = False
def unselect_all (self):
self.hookup_im_context ()
for t in self.selected:
t.unselect ()
self.selected = []
def select_link (self, link, modifiers):
if modifiers and modifiers & gtk.gdk.SHIFT_MASK and len (self.selected) > 1 and self.selected.count (link) > 0:
self.selected.remove (link)
link.unselect ()
return
self.hookup_im_context()
if self.editing:
self.finish_editing ()
if modifiers and (modifiers & gtk.gdk.SHIFT_MASK or modifiers == -1):
if self.selected.count (link) == 0:
self.selected.append (link)
else:
for t in self.selected:
t.unselect()
self.selected = [link]
link.select()
self.emit("change_buffer", None)
def select_thought (self, thought, modifiers):
self.hookup_im_context ()
if self.editing:
self.finish_editing ()
if thought in self.selected and self.moving:
return
if thought not in self.thoughts:
self.thoughts.append(thought)
if modifiers and (modifiers & (gtk.gdk.SHIFT_MASK | gtk.gdk.CONTROL_MASK) or modifiers == -1):
# Shift-click: add thought to selection
if self.selected.count (thought) == 0:
self.selected.append (thought)
else:
for x in self.selected:
x.unselect()
self.selected = [thought]
self.current_root = []
for x in self.selected:
if x.can_be_parent():
self.current_root.append(x)
thought.select ()
if len(self.selected) == 1:
self.emit ("thought_selection_changed", thought.background_color, thought.foreground_color)
self.background_color = thought.background_color
# Image thoughts don't have a foreground colour, so we shouldn't
# copy it.
if thought.foreground_color is not None:
self.foreground_color = thought.foreground_color
try:
self.emit ("change_buffer", thought.extended_buffer)
except AttributeError:
self.emit ("change_buffer", None)
self.hookup_im_context (thought)
else:
self.emit ("change_buffer", None)
def begin_editing (self, thought):
if self.editing and thought != self.editing:
self.finish_editing ()
if thought.begin_editing ():
self.editing = thought
self.invalidate()
def undo_link_action (self, action, mode):
self.undo.block ()
if self.editing:
self.finish_editing ()
link = action.args[0]
if action.undo_type == UNDO_CREATE_LINK:
if mode == UndoManager.REDO:
self.element.appendChild (link.element)
self.links.append (link)
else:
self.delete_link (link)
elif action.undo_type == UNDO_DELETE_LINK:
if mode == UndoManager.UNDO:
self.element.appendChild (link.element)
self.links.append (link)
else:
self.delete_link (link)
elif action.undo_type == UNDO_STRENGTHEN_LINK:
if mode == UndoManager.UNDO:
link.set_strength (action.args[1])
else:
link.set_strength (action.args[2])
self.undo.unblock ()
self.invalidate ()
def connect_link (self, link):
link.connect ("select_link", self.select_link)
link.connect ("update_view", self.update_view)
link.connect ("popup_requested", self.create_popup_menu)
def create_link (self, thought, thought_coords = None, child = None, child_coords = None, strength = 2):
if child:
for x in self.links:
if x.connects (thought, child):
if x.change_strength (thought, child):
self.delete_link (x)
return
link = Links.Link (self.save, parent = thought, child = child, strength = strength)
self.connect_link (link)
element = link.get_save_element ()
self.element.appendChild (element)
self.links.append (link)
return link
else:
if self.unending_link:
del self.unending_link
self.unending_link = Links.Link (self.save, parent = thought, start_coords = thought_coords,
end_coords = child_coords, strength = strength)
def set_mouse_cursor_cb (self, thought, cursor_type):
if not self.moving:
self.set_cursor (cursor_type)
def update_all_links(self):
for l in self.links:
l.find_ends()
def update_links_cb (self, thought):
for x in self.links:
if x.uses (thought):
x.find_ends ()
def claim_unending_link (self, thought):
if not self.unending_link:
return
if self.unending_link.parent == thought:
del self.unending_link
self.unending_link = None
return
for x in self.links:
if x.connects (self.unending_link.parent, thought):
old_strength = x.strength
x.change_strength (self.unending_link.parent, thought)
new_strength = x.strength
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_STRENGTHEN_LINK, self.undo_link_action, x, \
old_strength, new_strength))
del self.unending_link
self.unending_link = None
return
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_CREATE_LINK, self.undo_link_action, self.unending_link))
self.unending_link.set_child (thought)
self.links.append (self.unending_link)
self.connect_link(self.unending_link)
element = self.unending_link.get_save_element ()
self.element.appendChild (element)
self.unending_link = None
def create_popup_menu (self, thought, event, menu_type):
menu = gtk.Menu()
undo_item = gtk.ImageMenuItem(gtk.STOCK_UNDO)
undo_item.connect('activate', self.undo.undo_action)
undo_item.set_sensitive(self.undo.exists_undo_action())
redo_item = gtk.ImageMenuItem(gtk.STOCK_REDO)
redo_item.connect('activate', self.undo.redo_action)
redo_item.set_sensitive(self.undo.exists_redo_action())
sep_item = gtk.SeparatorMenuItem()
menu.append(undo_item)
menu.append(redo_item)
menu.append(sep_item)
undo_item.show()
redo_item.show()
sep_item.show()
if thought:
for item in thought.get_popup_menu_items():
menu.append(item)
item.show()
menu.popup(None, None, None, event.button, event.get_time())
def finish_editing (self, thought = None):
if not self.editing or (thought and thought != self.editing):
return
self.editing.finish_editing ()
thought = self.editing
self.editing = None
def update_view (self, thought):
if not self.editing:
self.invalidate ()
else:
x,y,w,h = thought.get_max_area()
w += 10
h += 10
self.invalidate ((x,y,w,h))
def invalidate (self, transformed_area = None):
'''Helper function to invalidate the entire screen, forcing a redraw'''
rect = None
if not transformed_area:
alloc = self.get_allocation ()
rect = gtk.gdk.Rectangle (0, 0, alloc.width, alloc.height)
else:
ul = self.untransform_coords(transformed_area[0], transformed_area[1])
lr = self.untransform_coords(transformed_area[2], transformed_area[3])
rect = gtk.gdk.Rectangle (int(ul[0]), int(ul[1]), int(lr[0]-ul[0]), int(lr[1]-ul[1]))
if self.window:
self.window.invalidate_rect (rect, True)
def expose (self, widget, event):
'''Expose event. Calls the draw function'''
context = self.window.cairo_create ()
self.draw (event, context)
return False
def draw (self, event, context):
'''Draw the map and all the associated thoughts'''
area = event.area
context.rectangle (area.x, area.y, area.width, area.height)
context.clip ()
context.set_source_rgb (1.0,1.0,1.0)
context.move_to (area.x, area.y)
context.paint ()
context.set_source_rgb (0.0,0.0,0.0)
alloc = self.get_allocation ()
context.translate(alloc.width/2., alloc.height/2.)
context.scale(self.scale_fac, self.scale_fac)
context.translate(-alloc.width/2., -alloc.height/2.)
context.translate(self.translation[0], self.translation[1])
for l in self.links:
l.draw (context)
if self.unending_link:
self.unending_link.draw (context)
self.untransform = context.get_matrix()
self.transform = context.get_matrix()
self.transform.invert()
ax, ay = self.transform_coords(area.x, area.y)
width = area.width / self.scale_fac
height = area.height / self.scale_fac
for t in self.thoughts:
try:
if max(t.ul[0],ax)<=min(t.lr[0],ax+width) and max(t.ul[1],ay)<=min(t.lr[1],ay+height):
t.draw (context)
except:
t.draw(context)
if self.is_bbox_selecting:
# Draw the dragged selection box
xs = self.bbox_origin[0]
ys = self.bbox_origin[1]
xe = self.bbox_current[0] - xs
ye = self.bbox_current[1] - ys
xs,ys = context.user_to_device(xs, ys)
xe,ye = context.user_to_device_distance(xe, ye)
xs = int(xs) + 0.5
ys = int(ys) + 0.5
xe = int(xe)
ye = int(ye)
xs,ys = context.device_to_user(xs, ys)
xe,ye = context.device_to_user_distance(xe, ye)
color = utils.selected_colors["border"]
context.set_line_width(1.0)
context.set_source_rgb(color[0], color[1], color[2])
context.rectangle(xs, ys, xe, ye)
context.stroke()
color = utils.selected_colors["fill"]
context.set_source_rgba(color[0], color[1], color[2], 0.3)
context.rectangle(xs, ys, xe, ye)
context.fill()
context.set_line_width(2.0)
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
def undo_create_cb (self, action, mode):
self.undo.block ()
if mode == UndoManager.UNDO:
if action.args[0] == self.editing:
self.editing = None
self.unselect_all ()
for t in action.args[1]:
self.select_thought (t, -1)
self.delete_thought (action.args[0])
self.emit ("change_mode", action.args[3])
else:
self.emit ("change_mode", action.args[2])
thought = action.args[0]
self.thoughts.append (thought)
for t in action.args[1]:
self.unselect_all ()
self.select_thought (t, -1)
self.hookup_im_context (thought)
self.emit ("change_buffer", thought.extended_buffer)
self.element.appendChild (thought.element)
for l in action.args[5:]:
self.links.append (l)
self.element.appendChild (l.element)
self.begin_editing (thought)
self.emit ("set_focus", None, False)
self.undo.unblock ()
self.invalidate ()
def create_new_thought (self, coords, thought_type = None, loading = False):
if self.editing:
self.editing.finish_editing ()
if thought_type != None:
type = thought_type
else:
type = self.mode
if type == TYPE_TEXT:
thought = TextThought.TextThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color)
elif type == TYPE_IMAGE:
thought = ImageThought.ImageThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color)
elif type == TYPE_DRAWING:
thought = DrawingThought.DrawingThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, \
loading,self.background_color, self.foreground_color)
elif type == TYPE_RESOURCE:
thought = ResourceThought.ResourceThought (coords, self.pango_context, self.nthoughts, self.save, self.undo, loading, self.background_color, self.foreground_color)
if not thought.okay ():
return None
if type == TYPE_IMAGE:
self.emit ("change_mode", self.old_mode)
self.nthoughts += 1
self.element.appendChild (thought.element)
thought.connect ("select_thought", self.select_thought)
thought.connect ("begin_editing", self.begin_editing)
thought.connect ("popup_requested", self.create_popup_menu)
thought.connect ("create_link", self.create_link)
thought.connect ("claim_unending_link", self.claim_unending_link)
thought.connect ("update_view", self.update_view)
thought.connect ("finish_editing", self.finish_editing)
thought.connect ("delete_thought", self.delete_thought)
thought.connect ("text_selection_changed", self.text_selection_cb)
thought.connect ("change_mouse_cursor", self.set_mouse_cursor_cb)
thought.connect ("update_links", self.update_links_cb)
thought.connect ("grab_focus", self.regain_focus_cb)
thought.connect ("update-attrs", self.update_attr_cb)
self.thoughts.append (thought)
return thought
def regain_focus_cb (self, thought, ext):
self.emit ("set_focus", None, ext)
def update_attr_cb (self, widget, bold, italics, underline, pango_font):
self.emit ("set_attrs", bold, italics, underline, pango_font)
def delete_thought (self, thought):
action = UndoManager.UndoAction (self, UNDO_DELETE_SINGLE, self.undo_deletion, [thought])
if thought.element in self.element.childNodes:
self.element.removeChild (thought.element)
self.thoughts.remove (thought)
try:
self.selected.remove (thought)
except:
pass
if self.editing == thought:
self.hookup_im_context ()
self.editing = None
if self.primary == thought:
thought.disconnect (self.title_change_handler)
self.title_change_handler = None
self.primary = None
if self.thoughts:
self.make_primary (self.thoughts[0])
rem_links = []
for l in self.links:
if l.uses (thought):
action.add_arg (l)
rem_links.append (l)
for l in rem_links:
self.delete_link (l)
self.undo.add_undo (action)
return True
def undo_deletion (self, action, mode):
self.undo.block ()
if mode == UndoManager.UNDO:
self.unselect_all ()
for l in action.args[1:]:
self.links.append (l)
self.element.appendChild (l.element)
for t in action.args[0]:
self.thoughts.append (t)
self.select_thought (t, -1)
self.element.appendChild (t.element)
if action.undo_type == UNDO_DELETE_SINGLE:
self.begin_editing (action.args[0][0])
self.emit ("change_buffer", action.args[0][0].extended_buffer)
if not self.primary:
self.make_primary (action.args[0][0])
else:
self.emit ("change_buffer", None)
else:
for t in action.args[0]:
self.delete_thought (t)
for l in action.args[1:]:
self.delete_link (l)
self.emit ("set_focus", None, False)
self.undo.unblock ()
self.invalidate ()
def delete_selected_elements (self):
if len(self.selected) == 0:
return
action = UndoManager.UndoAction (self, UNDO_DELETE, self.undo_deletion, copy.copy(self.selected))
# delete_thought as a callback adds it's own undo action. Block that here
self.undo.block ()
tmp = self.selected
t = tmp.pop()
while t:
if t in self.thoughts:
for l in self.links:
if l.uses (t):
action.add_arg (l)
self.delete_thought (t)
if t in self.links:
self.delete_link (t)
if len (tmp) == 0:
t = None
else:
t = tmp.pop()
self.undo.unblock ()
self.undo.add_undo (action)
self.invalidate ()
def delete_link (self, link):
if link.element in self.element.childNodes:
self.element.removeChild (link.element)
#link.element.unlink ()
self.links.remove (link)
def popup_menu_key (self, event):
print "Popup Menu Key"
def find_related_thought (self, radians):
# Find thought within angle
best = None
bestangle = 1000.
bestdist = 10000.
def do_find (one, two, currentangle, curdist, sensitivity):
init_x = (one.ul[0] + one.lr[0]) / 2.
init_y = (one.ul[1] + one.lr[1]) / 2.
other_x = (two.ul[0] + two.lr[0]) / 2.
other_y = (two.ul[1] + two.lr[1]) / 2.
angle = math.atan2 ((other_y - init_y), (other_x - init_x))
while angle > math.pi:
angle -= math.pi
while angle < -math.pi:
angle += math.pi
# We have to special-case left due to stupidity of tan's
# We shift it by pi radians
if radians == RAD_LEFT:
relangle = abs((angle+math.pi) - (radians+math.pi))
if relangle > math.pi*2.:
relangle -= math.pi*2.
else:
relangle = abs(angle - radians)
newdist = math.sqrt ((init_x - other_x)**2 + (init_y - other_y)**2)
magicnum = newdist + (50. * relangle)
# Used for debugging. Spits out lots of useful info
# to determine interesting things about the thought relations
#print "angle: "+str(angle)+" rel: "+str(magicnum)+" rads: "+str(radians),
#print " , "+str(math.pi / 3.0)+" , "+str(currentangle)+"\n: "+str(relangle)
if (relangle < sensitivity) and \
(magicnum < currentangle):
return (magicnum, newdist)
return (currentangle, curdist)
if len(self.selected) != 1:
return None
initial = self.selected[0]
for x in self.links:
if x.parent == initial:
other = x.child
elif x.child == initial:
other = x.parent
else:
continue
(curr, dist) = do_find (initial, other, bestangle, bestdist, math.pi/3.)
if curr < bestangle:
bestangle = curr
best = other
bestdist = dist
if not best:
for x in self.thoughts:
if x == self.selected[0]:
continue
(curr, dist) = do_find (initial, x, bestangle, bestdist, math.pi/4.)
if curr < bestangle:
best = x
bestangle = curr
bestdist = dist
return best
def undo_align(self, action, mode):
self.undo.block ()
dic = action.args[0]
if mode == UndoManager.UNDO:
for t in dic:
t.move_by(-dic[t][0], -dic[t][1])
else:
for t in dic:
t.move_by(dic[t][0], dic[t][1])
self.undo.unblock ()
def align_top_left(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].ul[0]
y = self.selected[0].ul[1]
for t in self.selected:
if vertical:
vec = (-(t.ul[0]-x), 0)
else:
vec = (0, -(t.ul[1]-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def align_bottom_right(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].lr[0]
y = self.selected[0].lr[1]
for t in self.selected:
if vertical:
vec = (-(t.lr[0]-x), 0)
else:
vec = (0, -(t.lr[1]-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def align_centered(self, vertical=True):
dic = {}
if len(self.selected) != 0:
x = self.selected[0].ul[0] + (self.selected[0].lr[0] - self.selected[0].ul[0]) / 2.0
y = self.selected[0].ul[1] + (self.selected[0].lr[1] - self.selected[0].ul[1]) / 2.0
for t in self.selected:
if vertical:
vec = (-((t.ul[0] + (t.lr[0]-t.ul[0])/2.0)-x), 0)
else:
vec = (0, -((t.ul[1] + (t.lr[1]-t.ul[1])/2.0)-y))
t.move_by(vec[0], vec[1])
dic[t] = vec
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_ALIGN, self.undo_align, dic))
def global_key_handler (self, event):
thought = None
if event.keyval == gtk.keysyms.Up:
thought = self.find_related_thought (RAD_UP)
elif event.keyval == gtk.keysyms.Down:
thought = self.find_related_thought (RAD_DOWN)
elif event.keyval == gtk.keysyms.Left:
thought = self.find_related_thought (RAD_LEFT)
elif event.keyval == gtk.keysyms.Right:
thought = self.find_related_thought (RAD_RIGHT)
elif event.keyval == gtk.keysyms.Delete:
self.delete_selected_elements ()
elif event.keyval == gtk.keysyms.BackSpace:
self.delete_selected_elements ()
elif event.keyval == gtk.keysyms.Menu:
self.popup_menu_key (event)
elif event.keyval == gtk.keysyms.Escape:
self.unselect_all ()
elif event.keyval == gtk.keysyms.a and event.state & gtk.gdk.CONTROL_MASK:
self.unselect_all ()
for t in self.thoughts:
t.select ()
self.selected.append (t)
else:
return False
if thought:
self.select_thought (thought, None)
self.invalidate ()
return True
def load_thought (self, node, type):
thought = self.create_new_thought (None, type, loading = True)
thought.load (node)
def load_link (self, node):
link = Links.Link (self.save)
self.connect_link (link)
link.load (node)
self.links.append (link)
element = link.get_save_element ()
self.element.appendChild (element)
def load_thyself (self, top_element, doc):
for node in top_element.childNodes:
if node.nodeName == "thought":
self.load_thought (node, TYPE_TEXT)
elif node.nodeName == "image_thought":
self.load_thought (node, TYPE_IMAGE)
elif node.nodeName == "drawing_thought":
self.load_thought (node, TYPE_DRAWING)
elif node.nodeName == "res_thought":
self.load_thought (node, TYPE_RESOURCE)
elif node.nodeName == "link":
self.load_link (node)
else:
print "Warning: Unknown element type. Ignoring: "+node.nodeName
self.finish_loading ()
def finish_loading (self):
# Possible TODO: This all assumes we've been given a proper,
# consistant file. It should fallback nicely, but...
# First, find the primary root:
for t in self.thoughts:
if t.am_primary:
self.make_primary (t)
if t.am_selected:
self.selected.append (t)
t.select ()
if t.editing:
self.begin_editing (t)
if t.identity >= self.nthoughts:
self.nthoughts = t.identity + 1
if self.selected:
self.current_root = self.selected
else:
self.current_root = [self.primary]
if len(self.selected) == 1:
self.emit ("change_buffer", self.selected[0].extended_buffer)
self.hookup_im_context (self.selected[0])
self.emit ("thought_selection_changed", self.selected[0].background_color, \
self.selected[0].foreground_color)
else:
self.emit ("change_buffer", None)
del_links = []
for l in self.links:
if (l.parent_number == -1 and l.child_number == -1) or \
(l.parent_number == l.child_number):
del_links.append (l)
continue
parent = child = None
for t in self.thoughts:
if t.identity == l.parent_number:
parent = t
elif t.identity == l.child_number:
child = t
if parent and child:
break
l.set_parent_child (parent, child)
if not l.parent or not l.child:
del_links.append (l)
for l in del_links:
self.delete_link (l)
def prepare_save(self):
for t in self.thoughts:
t.update_save ()
for l in self.links:
l.update_save ()
def save_thyself (self):
self.prepare_save()
if len(self.thoughts) > 0:
self.emit ("doc_save", self.save, self.element)
else:
self.emit ("doc_delete")
def text_selection_cb (self, thought, start, end, text):
self.emit ("text_selection_changed", start, end, text)
def copy_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].copy_text (clip)
def cut_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].cut_text (clip)
def paste_clipboard (self, clip):
if len (self.selected) != 1:
return
self.selected[0].paste_text (clip)
def export (self, context, width, height, native):
context.rectangle (0, 0, width, height)
context.clip ()
context.set_source_rgb (1.0,1.0,1.0)
context.move_to (0,0)
context.paint ()
context.set_source_rgb (0.0,0.0,0.0)
if not native:
move_x = self.move_x
move_y = self.move_y
else:
move_x = 0
move_y = 0
for l in self.links:
l.export (context, move_x, move_y)
for t in self.thoughts:
t.export (context, move_x, move_y)
def get_max_area (self):
minx = 999
maxx = -999
miny = 999
maxy = -999
for t in self.thoughts:
mx,my,mmx,mmy = t.get_max_area ()
if mx < minx:
minx = mx
if my < miny:
miny = my
if mmx > maxx:
maxx = mmx
if mmy > maxy:
maxy = mmy
# Add a 10px border around all
self.move_x = 10-minx
self.move_y = 10-miny
maxx = maxx-minx+20
maxy = maxy-miny+20
return (maxx,maxy)
def get_selection_bounds (self):
if len (self.selected) == 1:
try:
return self.selected[0].index, self.selected[0].end_index
except AttributeError:
return None, None
else:
return None, None
def thoughts_are_linked (self):
if len (self.selected) != 2:
return False
for l in self.links:
if l.connects (self.selected[0], self.selected[1]):
return True
return False
def link_menu_cb (self):
if len (self.selected) != 2:
return
lnk = None
for l in self.links:
if l.connects (self.selected[0], self.selected[1]):
lnk = l
break
if lnk:
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_DELETE_LINK, self.undo_link_action, lnk))
self.delete_link (lnk)
else:
lnk = self.create_link (self.selected[0], None, self.selected[1])
self.undo.add_undo (UndoManager.UndoAction (self, UNDO_CREATE_LINK, self.undo_link_action, lnk))
self.invalidate ()
def set_bold (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_bold (active)
self.invalidate()
def set_italics (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_italics (active)
self.invalidate()
def set_underline (self, active):
if len(self.selected) != 1:
return
self.selected[0].set_underline (active)
self.invalidate()
def set_background_color(self, color):
for s in self.selected:
s.background_color = color
self.background_color = color
if len(self.selected) > 1:
self.invalidate()
def set_foreground_color(self, color):
for s in self.selected:
s.foreground_color = color
self.foreground_color = color
if len(self.selected) > 1:
self.invalidate()
def set_font(self, font_name):
if len (self.selected) == 1 and hasattr(self.selected[0], "set_font"):
self.selected[0].set_font(font_name)
self.font_name = font_name
self.invalidate()
class CursorFactory:
# Shared state
cursors = {}
def get_cursor(self, cur_type):
if cur_type not in self.cursors:
self.cursors[cur_type] = gtk.gdk.Cursor(cur_type)
return self.cursors[cur_type]
| gpl-2.0 | 3,068,028,678,891,089,000 | 37.855114 | 175 | 0.542224 | false |
morgangalpin/binly | pavement.py | 1 | 7389 | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import time
import subprocess
# Import parameters from the setup file.
sys.path.append('.')
from setup import ( # noqa
setup_dict, get_project_files, print_success_message,
print_failure_message, _lint, _test, _test_all,
CODE_DIRECTORY, DOCS_DIRECTORY, TESTS_DIRECTORY, PYTEST_FLAGS)
from paver.easy import options, task, needs, consume_args # noqa
from paver.setuputils import install_distutils_tasks # noqa
options(setup=setup_dict)
install_distutils_tasks()
# Miscellaneous helper functions
def print_passed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=PASSED
print_success_message(r''' ___ _ ___ ___ ___ ___
| _ \/_\ / __/ __| __| \
| _/ _ \\__ \__ \ _|| |) |
|_|/_/ \_\___/___/___|___/
''')
def print_failed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=FAILED
print_failure_message(r''' ___ _ ___ _ ___ ___
| __/_\ |_ _| | | __| \
| _/ _ \ | || |__| _|| |) |
|_/_/ \_\___|____|___|___/
''')
class cwd(object):
"""Class used for temporarily changing directories. Can be though of
as a `pushd /my/dir' then a `popd' at the end.
"""
def __init__(self, newcwd):
""":param newcwd: directory to make the cwd
:type newcwd: :class:`str`
"""
self.newcwd = newcwd
def __enter__(self):
self.oldcwd = os.getcwd()
os.chdir(self.newcwd)
return os.getcwd()
def __exit__(self, type_, value, traceback):
# This acts like a `finally' clause: it will always be executed.
os.chdir(self.oldcwd)
# Task-related functions
def _doc_make(*make_args):
"""Run make in sphinx' docs directory.
:return: exit code
"""
if sys.platform == 'win32':
# Windows
make_cmd = ['make.bat']
else:
# Linux, Mac OS X, and others
make_cmd = ['make']
make_cmd.extend(make_args)
# Account for a stupid Python "bug" on Windows:
# <http://bugs.python.org/issue15533>
with cwd(DOCS_DIRECTORY):
retcode = subprocess.call(make_cmd)
return retcode
# Tasks
@task
@needs('doc_html', 'setuptools.command.sdist')
def sdist():
"""Build the HTML docs and the tarball."""
pass
@task
def test():
"""Run the unit tests."""
raise SystemExit(_test())
@task
def lint():
# This refuses to format properly when running `paver help' unless
# this ugliness is used.
('Perform PEP8 style check, run PyFlakes, and run McCabe complexity '
'metrics on the code.')
raise SystemExit(_lint())
@task
def test_all():
"""Perform a style check and run all unit tests."""
retcode = _test_all()
if retcode == 0:
print_passed()
else:
print_failed()
raise SystemExit(retcode)
@task
@consume_args
def run(args):
"""Run the package's main script. All arguments are passed to it."""
# The main script expects to get the called executable's name as
# argv[0]. However, paver doesn't provide that in args. Even if it did (or
# we dove into sys.argv), it wouldn't be useful because it would be paver's
# executable. So we just pass the package name in as the executable name,
# since it's close enough. This should never be seen by an end user
# installing through Setuptools anyway.
from binly.main import main
raise SystemExit(main([CODE_DIRECTORY] + args))
@task
def commit():
"""Commit only if all the tests pass."""
if _test_all() == 0:
subprocess.check_call(['git', 'commit'])
else:
print_failure_message('\nTests failed, not committing.')
@task
def coverage():
"""Run tests and show test coverage report."""
try:
import pytest_cov # NOQA
except ImportError:
print_failure_message(
'Install the pytest coverage plugin to use this task, '
"i.e., `pip install pytest-cov'.")
raise SystemExit(1)
import pytest
pytest.main(PYTEST_FLAGS + [
'--cov', CODE_DIRECTORY,
'--cov-report', 'term-missing',
TESTS_DIRECTORY])
@task # NOQA
def doc_watch():
"""Watch for changes in the docs and rebuild HTML docs when changed."""
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
except ImportError:
print_failure_message('Install the watchdog package to use this task, '
"i.e., `pip install watchdog'.")
raise SystemExit(1)
class RebuildDocsEventHandler(FileSystemEventHandler):
def __init__(self, base_paths):
self.base_paths = base_paths
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event: The event object representing the file system event.
:type event: :class:`watchdog.events.FileSystemEvent`
"""
for base_path in self.base_paths:
if event.src_path.endswith(base_path):
super(RebuildDocsEventHandler, self).dispatch(event)
# We found one that matches. We're done.
return
def on_modified(self, event):
print_failure_message('Modification detected. Rebuilding docs.')
# # Strip off the path prefix.
# import os
# if event.src_path[len(os.getcwd()) + 1:].startswith(
# CODE_DIRECTORY):
# # sphinx-build doesn't always pick up changes on code files,
# # even though they are used to generate the documentation. As
# # a workaround, just clean before building.
doc_html()
print_success_message('Docs have been rebuilt.')
print_success_message(
'Watching for changes in project files, press Ctrl-C to cancel...')
handler = RebuildDocsEventHandler(get_project_files())
observer = Observer()
observer.schedule(handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@task
@needs('doc_html')
def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index))
@task
def get_tasks():
"""Get all paver-defined tasks."""
from paver.tasks import environment
for the_task in environment.get_tasks():
print(the_task.shortname)
@task
def doc_html():
"""Build the HTML docs."""
retcode = _doc_make('html')
if retcode:
raise SystemExit(retcode)
@task
def doc_clean():
"""Clean (delete) the built docs."""
retcode = _doc_make('clean')
if retcode:
raise SystemExit(retcode)
| gpl-3.0 | 5,098,610,376,485,017,000 | 27.528958 | 79 | 0.599946 | false |
jjbrophy47/sn_spam | relational/scripts/tests/test_psl.py | 1 | 5158 | """
Tests the relational module.
"""
import os
import unittest
import mock
from .context import psl
from .context import config
from .context import pred_builder
from .context import util
from .context import test_utils as tu
class PSLTestCase(unittest.TestCase):
def setUp(self):
config_obj = tu.sample_config()
mock_pred_builder_obj = mock.Mock(pred_builder.PredicateBuilder)
util_obj = util.Util()
self.test_obj = psl.PSL(config_obj, mock_pred_builder_obj, util_obj)
def tearDown(self):
self.test_obj = None
def test_init(self):
# setup
result = self.test_obj
# assert
self.assertTrue(isinstance(result.config_obj, config.Config))
self.assertTrue(isinstance(result.pred_builder_obj,
pred_builder.PredicateBuilder))
self.assertTrue(isinstance(result.util_obj, util.Util))
def test_compile(self):
os.chdir = mock.Mock()
os.system = mock.Mock()
self.test_obj.compile('psl/')
build = 'mvn dependency:build-classpath '
build += '-Dmdep.outputFile=classpath.out -q'
expected = [mock.call('mvn compile -q'), mock.call(build)]
os.chdir.assert_called_with('psl/')
self.assertTrue(os.system.call_args_list == expected)
def test_run_infer(self):
os.chdir = mock.Mock()
os.system = mock.Mock()
self.test_obj.config_obj.infer = True
self.test_obj.run('psl/')
execute = 'java -Xmx60g -cp ./target/classes:`cat classpath.out` '
execute += 'spam.Infer 1 soundcloud intext posts'
os.chdir.assert_called_with('psl/')
os.system.assert_called_with(execute)
def test_clear_data(self):
psl_data_f = 'test_psl/'
os.system = mock.Mock()
self.test_obj.clear_data(psl_data_f)
expected = [mock.call('rm test_psl/*.tsv'),
mock.call('rm test_psl/*.txt'),
mock.call('rm test_psl/db/*.db')]
self.assertTrue(os.system.call_args_list == expected)
def test_gen_predicates(self):
self.test_obj.pred_builder_obj.build_comments = mock.Mock()
self.test_obj.pred_builder_obj.build_relations = mock.Mock()
self.test_obj.gen_predicates('df', 'test', 'd/', fw='fw')
expected = [mock.call('intext', 'text', 'text_id', 'df', 'test',
'd/', fw='fw'), mock.call('posts', 'user', 'user_id', 'df',
'test', 'd/', fw='fw')]
self.test_obj.pred_builder_obj.build_comments.assert_called_with('df',
'test', 'd/')
self.assertTrue(self.test_obj.pred_builder_obj.build_relations.
call_args_list == expected)
def test_gen_model(self):
self.test_obj.priors = mock.Mock(return_value=['n', 'p'])
self.test_obj.map_relation_to_rules = mock.Mock()
self.test_obj.map_relation_to_rules.side_effect = [['r1', 'r2'],
['a1', 'a2']]
self.test_obj.write_model = mock.Mock()
self.test_obj.gen_model('d/')
exp = ['n', 'p', 'r1', 'r2', 'a1', 'a2']
self.test_obj.priors.assert_called()
self.assertTrue(self.test_obj.map_relation_to_rules.call_args_list ==
[mock.call('intext', 'text'), mock.call('posts', 'user')])
self.test_obj.write_model.assert_called_with(exp, 'd/')
def test_network_size(self):
self.test_obj.util_obj.file_len = mock.Mock()
self.test_obj.util_obj.file_len.side_effect = [2, 4, 8]
self.test_obj.config_obj.relations = [('posts', 'user', 'user_id')]
self.test_obj.network_size('d/')
exp_fl = [mock.call('d/val_1.tsv'), mock.call('d/val_posts_1.tsv'),
mock.call('d/val_user_1.tsv')]
self.assertTrue(self.test_obj.util_obj.file_len.call_args_list ==
exp_fl)
def test_priors_no_sq(self):
self.test_obj.sq = False
result = self.test_obj.priors()
exp = ['1.0: ~spam(Com)', '1.0: indpred(Com) -> spam(Com)']
self.assertTrue(result == exp)
def test_priors_sq_diff_weights(self):
self.test_obj.wgt = 2.0
result = self.test_obj.priors()
exp = ['2.0: ~spam(Com) ^2', '2.0: indpred(Com) -> spam(Com) ^2']
self.assertTrue(result == exp)
def test_map_relation_to_rules_no_sq(self):
self.test_obj.sq = False
result = self.test_obj.map_relation_to_rules('intext', 'text')
r1 = '1.0: intext(Com, Text) & spammytext(Text) -> spam(Com)'
r2 = '1.0: intext(Com, Text) & spam(Com) -> spammytext(Text)'
self.assertTrue(result == [r1, r2])
def test_map_relation_to_rules_sq_diff_weights(self):
self.test_obj.wgt = 2.0
result = self.test_obj.map_relation_to_rules('intext', 'text')
r1 = '2.0: intext(Com, Text) & spammytext(Text) -> spam(Com) ^2'
r2 = '2.0: intext(Com, Text) & spam(Com) -> spammytext(Text) ^2'
self.assertTrue(result == [r1, r2])
def test_suite():
suite = unittest.TestLoader().loadTestsFromTestCase(PSLTestCase)
return suite
if __name__ == '__main__':
unittest.main()
| mit | -1,751,147,986,667,544,300 | 33.15894 | 78 | 0.585692 | false |
PyCQA/astroid | astroid/node_classes.py | 1 | 162275 | # Copyright (c) 2009-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2014-2021 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Eevee (Alex Munroe) <[email protected]>
# Copyright (c) 2015-2016 Ceridwen <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2016-2017 Derek Gustafson <[email protected]>
# Copyright (c) 2016 Jared Garst <[email protected]>
# Copyright (c) 2016 Jakub Wilk <[email protected]>
# Copyright (c) 2016 Dave Baum <[email protected]>
# Copyright (c) 2017-2020 Ashley Whetter <[email protected]>
# Copyright (c) 2017, 2019 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 rr- <[email protected]>
# Copyright (c) 2018-2021 hippo91 <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Nick Drozd <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 brendanator <[email protected]>
# Copyright (c) 2018 HoverHell <[email protected]>
# Copyright (c) 2019 kavins14 <[email protected]>
# Copyright (c) 2019 kavins14 <[email protected]>
# Copyright (c) 2020 Raphael Gaschignard <[email protected]>
# Copyright (c) 2020 Bryce Guinta <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Andrew Haigh <[email protected]>
# Copyright (c) 2021 Federico Bond <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""Module for some node classes. More nodes in scoped_nodes.py"""
import abc
import itertools
import pprint
import typing
from functools import lru_cache
from functools import singledispatch as _singledispatch
from typing import ClassVar, Optional
from astroid import as_string, bases
from astroid import context as contextmod
from astroid import decorators, mixins, util
from astroid.const import BUILTINS, Context
from astroid.exceptions import (
AstroidError,
AstroidIndexError,
AstroidTypeError,
InferenceError,
NoDefault,
UseInferenceDefault,
)
from astroid.manager import AstroidManager
try:
from typing import Literal
except ImportError:
# typing.Literal was added in Python 3.8
from typing_extensions import Literal
def _is_const(value):
return isinstance(value, tuple(CONST_CLS))
@decorators.raise_if_nothing_inferred
def unpack_infer(stmt, context=None):
"""recursively generate nodes inferred by the given statement.
If the inferred value is a list or a tuple, recurse on the elements
"""
if isinstance(stmt, (List, Tuple)):
for elt in stmt.elts:
if elt is util.Uninferable:
yield elt
continue
yield from unpack_infer(elt, context)
return dict(node=stmt, context=context)
# if inferred is a final node, return it and stop
inferred = next(stmt.infer(context), util.Uninferable)
if inferred is stmt:
yield inferred
return dict(node=stmt, context=context)
# else, infer recursively, except Uninferable object that should be returned as is
for inferred in stmt.infer(context):
if inferred is util.Uninferable:
yield inferred
else:
yield from unpack_infer(inferred, context)
return dict(node=stmt, context=context)
def are_exclusive(stmt1, stmt2, exceptions: Optional[typing.List[str]] = None) -> bool:
"""return true if the two given statements are mutually exclusive
`exceptions` may be a list of exception names. If specified, discard If
branches and check one of the statement is in an exception handler catching
one of the given exceptions.
algorithm :
1) index stmt1's parents
2) climb among stmt2's parents until we find a common parent
3) if the common parent is a If or TryExcept statement, look if nodes are
in exclusive branches
"""
# index stmt1's parents
stmt1_parents = {}
children = {}
node = stmt1.parent
previous = stmt1
while node:
stmt1_parents[node] = 1
children[node] = previous
previous = node
node = node.parent
# climb among stmt2's parents until we find a common parent
node = stmt2.parent
previous = stmt2
while node:
if node in stmt1_parents:
# if the common parent is a If or TryExcept statement, look if
# nodes are in exclusive branches
if isinstance(node, If) and exceptions is None:
if (
node.locate_child(previous)[1]
is not node.locate_child(children[node])[1]
):
return True
elif isinstance(node, TryExcept):
c2attr, c2node = node.locate_child(previous)
c1attr, c1node = node.locate_child(children[node])
if c1node is not c2node:
first_in_body_caught_by_handlers = (
c2attr == "handlers"
and c1attr == "body"
and previous.catch(exceptions)
)
second_in_body_caught_by_handlers = (
c2attr == "body"
and c1attr == "handlers"
and children[node].catch(exceptions)
)
first_in_else_other_in_handlers = (
c2attr == "handlers" and c1attr == "orelse"
)
second_in_else_other_in_handlers = (
c2attr == "orelse" and c1attr == "handlers"
)
if any(
(
first_in_body_caught_by_handlers,
second_in_body_caught_by_handlers,
first_in_else_other_in_handlers,
second_in_else_other_in_handlers,
)
):
return True
elif c2attr == "handlers" and c1attr == "handlers":
return previous is not children[node]
return False
previous = node
node = node.parent
return False
# getitem() helpers.
_SLICE_SENTINEL = object()
def _slice_value(index, context=None):
"""Get the value of the given slice index."""
if isinstance(index, Const):
if isinstance(index.value, (int, type(None))):
return index.value
elif index is None:
return None
else:
# Try to infer what the index actually is.
# Since we can't return all the possible values,
# we'll stop at the first possible value.
try:
inferred = next(index.infer(context=context))
except (InferenceError, StopIteration):
pass
else:
if isinstance(inferred, Const):
if isinstance(inferred.value, (int, type(None))):
return inferred.value
# Use a sentinel, because None can be a valid
# value that this function can return,
# as it is the case for unspecified bounds.
return _SLICE_SENTINEL
def _infer_slice(node, context=None):
lower = _slice_value(node.lower, context)
upper = _slice_value(node.upper, context)
step = _slice_value(node.step, context)
if all(elem is not _SLICE_SENTINEL for elem in (lower, upper, step)):
return slice(lower, upper, step)
raise AstroidTypeError(
message="Could not infer slice used in subscript",
node=node,
index=node.parent,
context=context,
)
def _container_getitem(instance, elts, index, context=None):
"""Get a slice or an item, using the given *index*, for the given sequence."""
try:
if isinstance(index, Slice):
index_slice = _infer_slice(index, context=context)
new_cls = instance.__class__()
new_cls.elts = elts[index_slice]
new_cls.parent = instance.parent
return new_cls
if isinstance(index, Const):
return elts[index.value]
except IndexError as exc:
raise AstroidIndexError(
message="Index {index!s} out of range",
node=instance,
index=index,
context=context,
) from exc
except TypeError as exc:
raise AstroidTypeError(
message="Type error {error!r}", node=instance, index=index, context=context
) from exc
raise AstroidTypeError("Could not use %s as subscript index" % index)
OP_PRECEDENCE = {
op: precedence
for precedence, ops in enumerate(
[
["Lambda"], # lambda x: x + 1
["IfExp"], # 1 if True else 2
["or"],
["and"],
["not"],
["Compare"], # in, not in, is, is not, <, <=, >, >=, !=, ==
["|"],
["^"],
["&"],
["<<", ">>"],
["+", "-"],
["*", "@", "/", "//", "%"],
["UnaryOp"], # +, -, ~
["**"],
["Await"],
]
)
for op in ops
}
class NodeNG:
"""A node of the new Abstract Syntax Tree (AST).
This is the base class for all Astroid node classes.
"""
is_statement: ClassVar[bool] = False
"""Whether this node indicates a statement."""
optional_assign: ClassVar[
bool
] = False # True for For (and for Comprehension if py <3.0)
"""Whether this node optionally assigns a variable.
This is for loop assignments because loop won't necessarily perform an
assignment if the loop has no iterations.
This is also the case from comprehensions in Python 2.
"""
is_function: ClassVar[bool] = False # True for FunctionDef nodes
"""Whether this node indicates a function."""
is_lambda: ClassVar[bool] = False
# Attributes below are set by the builder module or by raw factories
_astroid_fields: ClassVar[typing.Tuple[str, ...]] = ()
"""Node attributes that contain child nodes.
This is redefined in most concrete classes.
"""
_other_fields: ClassVar[typing.Tuple[str, ...]] = ()
"""Node attributes that do not contain child nodes."""
_other_other_fields: ClassVar[typing.Tuple[str, ...]] = ()
"""Attributes that contain AST-dependent fields."""
# instance specific inference function infer(node, context)
_explicit_inference = None
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional["NodeNG"] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.lineno: Optional[int] = lineno
"""The line that this node appears on in the source code."""
self.col_offset: Optional[int] = col_offset
"""The column that this node appears on in the source code."""
self.parent: Optional["NodeNG"] = parent
"""The parent node in the syntax tree."""
def infer(self, context=None, **kwargs):
"""Get a generator of the inferred values.
This is the main entry point to the inference system.
.. seealso:: :ref:`inference`
If the instance has some explicit inference function set, it will be
called instead of the default interface.
:returns: The inferred values.
:rtype: iterable
"""
if context is not None:
context = context.extra_context.get(self, context)
if self._explicit_inference is not None:
# explicit_inference is not bound, give it self explicitly
try:
# pylint: disable=not-callable
results = tuple(self._explicit_inference(self, context, **kwargs))
if context is not None:
context.nodes_inferred += len(results)
yield from results
return
except UseInferenceDefault:
pass
if not context:
# nodes_inferred?
yield from self._infer(context, **kwargs)
return
key = (self, context.lookupname, context.callcontext, context.boundnode)
if key in context.inferred:
yield from context.inferred[key]
return
generator = self._infer(context, **kwargs)
results = []
# Limit inference amount to help with performance issues with
# exponentially exploding possible results.
limit = AstroidManager().max_inferable_values
for i, result in enumerate(generator):
if i >= limit or (context.nodes_inferred > context.max_inferred):
yield util.Uninferable
break
results.append(result)
yield result
context.nodes_inferred += 1
# Cache generated results for subsequent inferences of the
# same node using the same context
context.inferred[key] = tuple(results)
return
def _repr_name(self):
"""Get a name for nice representation.
This is either :attr:`name`, :attr:`attrname`, or the empty string.
:returns: The nice name.
:rtype: str
"""
if all(name not in self._astroid_fields for name in ("name", "attrname")):
return getattr(self, "name", "") or getattr(self, "attrname", "")
return ""
def __str__(self):
rname = self._repr_name()
cname = type(self).__name__
if rname:
string = "%(cname)s.%(rname)s(%(fields)s)"
alignment = len(cname) + len(rname) + 2
else:
string = "%(cname)s(%(fields)s)"
alignment = len(cname) + 1
result = []
for field in self._other_fields + self._astroid_fields:
value = getattr(self, field)
width = 80 - len(field) - alignment
lines = pprint.pformat(value, indent=2, width=width).splitlines(True)
inner = [lines[0]]
for line in lines[1:]:
inner.append(" " * alignment + line)
result.append("{}={}".format(field, "".join(inner)))
return string % {
"cname": cname,
"rname": rname,
"fields": (",\n" + " " * alignment).join(result),
}
def __repr__(self):
rname = self._repr_name()
if rname:
string = "<%(cname)s.%(rname)s l.%(lineno)s at 0x%(id)x>"
else:
string = "<%(cname)s l.%(lineno)s at 0x%(id)x>"
return string % {
"cname": type(self).__name__,
"rname": rname,
"lineno": self.fromlineno,
"id": id(self),
}
def accept(self, visitor):
"""Visit this node using the given visitor."""
func = getattr(visitor, "visit_" + self.__class__.__name__.lower())
return func(self)
def get_children(self):
"""Get the child nodes below this node.
:returns: The children.
:rtype: iterable(NodeNG)
"""
for field in self._astroid_fields:
attr = getattr(self, field)
if attr is None:
continue
if isinstance(attr, (list, tuple)):
yield from attr
else:
yield attr
yield from ()
def last_child(self): # -> Optional["NodeNG"]
"""An optimized version of list(get_children())[-1]"""
for field in self._astroid_fields[::-1]:
attr = getattr(self, field)
if not attr: # None or empty listy / tuple
continue
if isinstance(attr, (list, tuple)):
return attr[-1]
return attr
return None
def parent_of(self, node):
"""Check if this node is the parent of the given node.
:param node: The node to check if it is the child.
:type node: NodeNG
:returns: True if this node is the parent of the given node,
False otherwise.
:rtype: bool
"""
parent = node.parent
while parent is not None:
if self is parent:
return True
parent = parent.parent
return False
def statement(self):
"""The first parent node, including self, marked as statement node.
:returns: The first parent statement.
:rtype: NodeNG
"""
if self.is_statement:
return self
return self.parent.statement()
def frame(self):
"""The first parent frame node.
A frame node is a :class:`Module`, :class:`FunctionDef`,
or :class:`ClassDef`.
:returns: The first parent frame node.
:rtype: Module or FunctionDef or ClassDef
"""
return self.parent.frame()
def scope(self):
"""The first parent node defining a new scope.
:returns: The first parent scope node.
:rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr
"""
if self.parent:
return self.parent.scope()
return None
def root(self):
"""Return the root node of the syntax tree.
:returns: The root node.
:rtype: Module
"""
if self.parent:
return self.parent.root()
return self
def child_sequence(self, child):
"""Search for the sequence that contains this child.
:param child: The child node to search sequences for.
:type child: NodeNG
:returns: The sequence containing the given child node.
:rtype: iterable(NodeNG)
:raises AstroidError: If no sequence could be found that contains
the given child.
"""
for field in self._astroid_fields:
node_or_sequence = getattr(self, field)
if node_or_sequence is child:
return [node_or_sequence]
# /!\ compiler.ast Nodes have an __iter__ walking over child nodes
if (
isinstance(node_or_sequence, (tuple, list))
and child in node_or_sequence
):
return node_or_sequence
msg = "Could not find %s in %s's children"
raise AstroidError(msg % (repr(child), repr(self)))
def locate_child(self, child):
"""Find the field of this node that contains the given child.
:param child: The child node to search fields for.
:type child: NodeNG
:returns: A tuple of the name of the field that contains the child,
and the sequence or node that contains the child node.
:rtype: tuple(str, iterable(NodeNG) or NodeNG)
:raises AstroidError: If no field could be found that contains
the given child.
"""
for field in self._astroid_fields:
node_or_sequence = getattr(self, field)
# /!\ compiler.ast Nodes have an __iter__ walking over child nodes
if child is node_or_sequence:
return field, child
if (
isinstance(node_or_sequence, (tuple, list))
and child in node_or_sequence
):
return field, node_or_sequence
msg = "Could not find %s in %s's children"
raise AstroidError(msg % (repr(child), repr(self)))
# FIXME : should we merge child_sequence and locate_child ? locate_child
# is only used in are_exclusive, child_sequence one time in pylint.
def next_sibling(self):
"""The next sibling statement node.
:returns: The next sibling statement node.
:rtype: NodeNG or None
"""
return self.parent.next_sibling()
def previous_sibling(self):
"""The previous sibling statement.
:returns: The previous sibling statement node.
:rtype: NodeNG or None
"""
return self.parent.previous_sibling()
# these are lazy because they're relatively expensive to compute for every
# single node, and they rarely get looked at
@decorators.cachedproperty
def fromlineno(self) -> Optional[int]:
"""The first line that this node appears on in the source code."""
if self.lineno is None:
return self._fixed_source_line()
return self.lineno
@decorators.cachedproperty
def tolineno(self) -> Optional[int]:
"""The last line that this node appears on in the source code."""
if not self._astroid_fields:
# can't have children
last_child = None
else:
last_child = self.last_child()
if last_child is None:
return self.fromlineno
return last_child.tolineno # pylint: disable=no-member
def _fixed_source_line(self) -> Optional[int]:
"""Attempt to find the line that this node appears on.
We need this method since not all nodes have :attr:`lineno` set.
"""
line = self.lineno
_node = self
try:
while line is None:
_node = next(_node.get_children())
line = _node.lineno
except StopIteration:
_node = self.parent
while _node and line is None:
line = _node.lineno
_node = _node.parent
return line
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int or None)
"""
return lineno, self.tolineno
def set_local(self, name, stmt):
"""Define that the given name is declared in the given statement node.
This definition is stored on the parent scope node.
.. seealso:: :meth:`scope`
:param name: The name that is being defined.
:type name: str
:param stmt: The statement that defines the given name.
:type stmt: NodeNG
"""
self.parent.set_local(name, stmt)
def nodes_of_class(self, klass, skip_klass=None):
"""Get the nodes (including this one or below) of the given types.
:param klass: The types of node to search for.
:type klass: builtins.type or tuple(builtins.type)
:param skip_klass: The types of node to ignore. This is useful to ignore
subclasses of :attr:`klass`.
:type skip_klass: builtins.type or tuple(builtins.type)
:returns: The node of the given types.
:rtype: iterable(NodeNG)
"""
if isinstance(self, klass):
yield self
if skip_klass is None:
for child_node in self.get_children():
yield from child_node.nodes_of_class(klass, skip_klass)
return
for child_node in self.get_children():
if isinstance(child_node, skip_klass):
continue
yield from child_node.nodes_of_class(klass, skip_klass)
@decorators.cached
def _get_assign_nodes(self):
return []
def _get_name_nodes(self):
for child_node in self.get_children():
yield from child_node._get_name_nodes()
def _get_return_nodes_skip_functions(self):
yield from ()
def _get_yield_nodes_skip_lambdas(self):
yield from ()
def _infer_name(self, frame, name):
# overridden for ImportFrom, Import, Global, TryExcept and Arguments
pass
def _infer(self, context=None):
"""we don't know how to resolve a statement by default"""
# this method is overridden by most concrete classes
raise InferenceError(
"No inference function for {node!r}.", node=self, context=context
)
def inferred(self):
"""Get a list of the inferred values.
.. seealso:: :ref:`inference`
:returns: The inferred values.
:rtype: list
"""
return list(self.infer())
def instantiate_class(self):
"""Instantiate an instance of the defined class.
.. note::
On anything other than a :class:`ClassDef` this will return self.
:returns: An instance of the defined class.
:rtype: object
"""
return self
def has_base(self, node):
"""Check if this node inherits from the given type.
:param node: The node defining the base to look for.
Usually this is a :class:`Name` node.
:type node: NodeNG
"""
return False
def callable(self):
"""Whether this node defines something that is callable.
:returns: True if this defines something that is callable,
False otherwise.
:rtype: bool
"""
return False
def eq(self, value):
return False
def as_string(self):
"""Get the source code that this node represents.
:returns: The source code.
:rtype: str
"""
return as_string.to_code(self)
def repr_tree(
self,
ids=False,
include_linenos=False,
ast_state=False,
indent=" ",
max_depth=0,
max_width=80,
) -> str:
"""Get a string representation of the AST from this node.
:param ids: If true, includes the ids with the node type names.
:type ids: bool
:param include_linenos: If true, includes the line numbers and
column offsets.
:type include_linenos: bool
:param ast_state: If true, includes information derived from
the whole AST like local and global variables.
:type ast_state: bool
:param indent: A string to use to indent the output string.
:type indent: str
:param max_depth: If set to a positive integer, won't return
nodes deeper than max_depth in the string.
:type max_depth: int
:param max_width: Attempt to format the output string to stay
within this number of characters, but can exceed it under some
circumstances. Only positive integer values are valid, the default is 80.
:type max_width: int
:returns: The string representation of the AST.
:rtype: str
"""
@_singledispatch
def _repr_tree(node, result, done, cur_indent="", depth=1):
"""Outputs a representation of a non-tuple/list, non-node that's
contained within an AST, including strings.
"""
lines = pprint.pformat(
node, width=max(max_width - len(cur_indent), 1)
).splitlines(True)
result.append(lines[0])
result.extend([cur_indent + line for line in lines[1:]])
return len(lines) != 1
# pylint: disable=unused-variable,useless-suppression; doesn't understand singledispatch
@_repr_tree.register(tuple)
@_repr_tree.register(list)
def _repr_seq(node, result, done, cur_indent="", depth=1):
"""Outputs a representation of a sequence that's contained within an AST."""
cur_indent += indent
result.append("[")
if not node:
broken = False
elif len(node) == 1:
broken = _repr_tree(node[0], result, done, cur_indent, depth)
elif len(node) == 2:
broken = _repr_tree(node[0], result, done, cur_indent, depth)
if not broken:
result.append(", ")
else:
result.append(",\n")
result.append(cur_indent)
broken = _repr_tree(node[1], result, done, cur_indent, depth) or broken
else:
result.append("\n")
result.append(cur_indent)
for child in node[:-1]:
_repr_tree(child, result, done, cur_indent, depth)
result.append(",\n")
result.append(cur_indent)
_repr_tree(node[-1], result, done, cur_indent, depth)
broken = True
result.append("]")
return broken
# pylint: disable=unused-variable,useless-suppression; doesn't understand singledispatch
@_repr_tree.register(NodeNG)
def _repr_node(node, result, done, cur_indent="", depth=1):
"""Outputs a strings representation of an astroid node."""
if node in done:
result.append(
indent
+ "<Recursion on {} with id={}".format(
type(node).__name__, id(node)
)
)
return False
done.add(node)
if max_depth and depth > max_depth:
result.append("...")
return False
depth += 1
cur_indent += indent
if ids:
result.append(f"{type(node).__name__}<0x{id(node):x}>(\n")
else:
result.append("%s(" % type(node).__name__)
fields = []
if include_linenos:
fields.extend(("lineno", "col_offset"))
fields.extend(node._other_fields)
fields.extend(node._astroid_fields)
if ast_state:
fields.extend(node._other_other_fields)
if not fields:
broken = False
elif len(fields) == 1:
result.append("%s=" % fields[0])
broken = _repr_tree(
getattr(node, fields[0]), result, done, cur_indent, depth
)
else:
result.append("\n")
result.append(cur_indent)
for field in fields[:-1]:
result.append("%s=" % field)
_repr_tree(getattr(node, field), result, done, cur_indent, depth)
result.append(",\n")
result.append(cur_indent)
result.append("%s=" % fields[-1])
_repr_tree(getattr(node, fields[-1]), result, done, cur_indent, depth)
broken = True
result.append(")")
return broken
result = []
_repr_tree(self, result, set())
return "".join(result)
def bool_value(self, context=None):
"""Determine the boolean value of this node.
The boolean value of a node can have three
possible values:
* False: For instance, empty data structures,
False, empty strings, instances which return
explicitly False from the __nonzero__ / __bool__
method.
* True: Most of constructs are True by default:
classes, functions, modules etc
* Uninferable: The inference engine is uncertain of the
node's value.
:returns: The boolean value of this node.
:rtype: bool or Uninferable
"""
return util.Uninferable
def op_precedence(self):
# Look up by class name or default to highest precedence
return OP_PRECEDENCE.get(self.__class__.__name__, len(OP_PRECEDENCE))
def op_left_associative(self):
# Everything is left associative except `**` and IfExp
return True
class Statement(NodeNG):
"""Statement node adding a few attributes"""
is_statement = True
"""Whether this node indicates a statement."""
def next_sibling(self):
"""The next sibling statement node.
:returns: The next sibling statement node.
:rtype: NodeNG or None
"""
stmts = self.parent.child_sequence(self)
index = stmts.index(self)
try:
return stmts[index + 1]
except IndexError:
return None
def previous_sibling(self):
"""The previous sibling statement.
:returns: The previous sibling statement node.
:rtype: NodeNG or None
"""
stmts = self.parent.child_sequence(self)
index = stmts.index(self)
if index >= 1:
return stmts[index - 1]
return None
class _BaseContainer(
mixins.ParentAssignTypeMixin, NodeNG, bases.Instance, metaclass=abc.ABCMeta
):
"""Base class for Set, FrozenSet, Tuple and List."""
_astroid_fields = ("elts",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.elts: typing.List[NodeNG] = []
"""The elements in the node."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, elts: typing.List[NodeNG]) -> None:
"""Do some setup after initialisation.
:param elts: The list of elements the that node contains.
"""
self.elts = elts
@classmethod
def from_elements(cls, elts=None):
"""Create a node of this type from the given list of elements.
:param elts: The list of elements that the node should contain.
:type elts: list(NodeNG)
:returns: A new node containing the given elements.
:rtype: NodeNG
"""
node = cls()
if elts is None:
node.elts = []
else:
node.elts = [const_factory(e) if _is_const(e) else e for e in elts]
return node
def itered(self):
"""An iterator over the elements this node contains.
:returns: The contents of this node.
:rtype: iterable(NodeNG)
"""
return self.elts
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
:rtype: bool or Uninferable
"""
return bool(self.elts)
@abc.abstractmethod
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
def get_children(self):
yield from self.elts
class LookupMixIn:
"""Mixin to look up a name in the right scope."""
@lru_cache(maxsize=None)
def lookup(self, name):
"""Lookup where the given variable is assigned.
The lookup starts from self's scope. If self is not a frame itself
and the name is found in the inner frame locals, statements will be
filtered to remove ignorable statements according to self's location.
:param name: The name of the variable to find assignments for.
:type name: str
:returns: The scope node and the list of assignments associated to the
given name according to the scope where it has been found (locals,
globals or builtin).
:rtype: tuple(str, list(NodeNG))
"""
return self.scope().scope_lookup(self, name)
def ilookup(self, name):
"""Lookup the inferred values of the given variable.
:param name: The variable name to find values for.
:type name: str
:returns: The inferred values of the statements returned from
:meth:`lookup`.
:rtype: iterable
"""
frame, stmts = self.lookup(name)
context = contextmod.InferenceContext()
return bases._infer_stmts(stmts, context, frame)
def _get_filtered_node_statements(self, nodes):
statements = [(node, node.statement()) for node in nodes]
# Next we check if we have ExceptHandlers that are parent
# of the underlying variable, in which case the last one survives
if len(statements) > 1 and all(
isinstance(stmt, ExceptHandler) for _, stmt in statements
):
statements = [
(node, stmt) for node, stmt in statements if stmt.parent_of(self)
]
return statements
def _filter_stmts(self, stmts, frame, offset):
"""Filter the given list of statements to remove ignorable statements.
If self is not a frame itself and the name is found in the inner
frame locals, statements will be filtered to remove ignorable
statements according to self's location.
:param stmts: The statements to filter.
:type stmts: list(NodeNG)
:param frame: The frame that all of the given statements belong to.
:type frame: NodeNG
:param offset: The line offset to filter statements up to.
:type offset: int
:returns: The filtered statements.
:rtype: list(NodeNG)
"""
# if offset == -1, my actual frame is not the inner frame but its parent
#
# class A(B): pass
#
# we need this to resolve B correctly
if offset == -1:
myframe = self.frame().parent.frame()
else:
myframe = self.frame()
# If the frame of this node is the same as the statement
# of this node, then the node is part of a class or
# a function definition and the frame of this node should be the
# the upper frame, not the frame of the definition.
# For more information why this is important,
# see Pylint issue #295.
# For example, for 'b', the statement is the same
# as the frame / scope:
#
# def test(b=1):
# ...
if self.statement() is myframe and myframe.parent:
myframe = myframe.parent.frame()
mystmt = self.statement()
# line filtering if we are in the same frame
#
# take care node may be missing lineno information (this is the case for
# nodes inserted for living objects)
if myframe is frame and mystmt.fromlineno is not None:
assert mystmt.fromlineno is not None, mystmt
mylineno = mystmt.fromlineno + offset
else:
# disabling lineno filtering
mylineno = 0
_stmts = []
_stmt_parents = []
statements = self._get_filtered_node_statements(stmts)
for node, stmt in statements:
# line filtering is on and we have reached our location, break
if stmt.fromlineno and stmt.fromlineno > mylineno > 0:
break
# Ignore decorators with the same name as the
# decorated function
# Fixes issue #375
if mystmt is stmt and is_from_decorator(self):
continue
assert hasattr(node, "assign_type"), (
node,
node.scope(),
node.scope().locals,
)
assign_type = node.assign_type()
if node.has_base(self):
break
_stmts, done = assign_type._get_filtered_stmts(self, node, _stmts, mystmt)
if done:
break
optional_assign = assign_type.optional_assign
if optional_assign and assign_type.parent_of(self):
# we are inside a loop, loop var assignment is hiding previous
# assignment
_stmts = [node]
_stmt_parents = [stmt.parent]
continue
if isinstance(assign_type, NamedExpr):
_stmts = [node]
continue
# XXX comment various branches below!!!
try:
pindex = _stmt_parents.index(stmt.parent)
except ValueError:
pass
else:
# we got a parent index, this means the currently visited node
# is at the same block level as a previously visited node
if _stmts[pindex].assign_type().parent_of(assign_type):
# both statements are not at the same block level
continue
# if currently visited node is following previously considered
# assignment and both are not exclusive, we can drop the
# previous one. For instance in the following code ::
#
# if a:
# x = 1
# else:
# x = 2
# print x
#
# we can't remove neither x = 1 nor x = 2 when looking for 'x'
# of 'print x'; while in the following ::
#
# x = 1
# x = 2
# print x
#
# we can remove x = 1 when we see x = 2
#
# moreover, on loop assignment types, assignment won't
# necessarily be done if the loop has no iteration, so we don't
# want to clear previous assignments if any (hence the test on
# optional_assign)
if not (optional_assign or are_exclusive(_stmts[pindex], node)):
if (
# In case of partial function node, if the statement is different
# from the origin function then it can be deleted otherwise it should
# remain to be able to correctly infer the call to origin function.
not node.is_function
or node.qname() != "PartialFunction"
or node.name != _stmts[pindex].name
):
del _stmt_parents[pindex]
del _stmts[pindex]
if isinstance(node, AssignName):
if not optional_assign and stmt.parent is mystmt.parent:
_stmts = []
_stmt_parents = []
elif isinstance(node, DelName):
_stmts = []
_stmt_parents = []
continue
if not are_exclusive(self, node):
_stmts.append(node)
_stmt_parents.append(stmt.parent)
return _stmts
# Name classes
class AssignName(
mixins.NoChildrenMixin, LookupMixIn, mixins.ParentAssignTypeMixin, NodeNG
):
"""Variation of :class:`ast.Assign` representing assignment to a name.
An :class:`AssignName` is the name of something that is assigned to.
This includes variables defined in a function signature or in a loop.
>>> node = astroid.extract_node('variable = range(10)')
>>> node
<Assign l.1 at 0x7effe1db8550>
>>> list(node.get_children())
[<AssignName.variable l.1 at 0x7effe1db8748>, <Call l.1 at 0x7effe1db8630>]
>>> list(node.get_children())[0].as_string()
'variable'
"""
_other_fields = ("name",)
def __init__(
self,
name: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param name: The name that is assigned to.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.name: Optional[str] = name
"""The name that is assigned to."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
class DelName(
mixins.NoChildrenMixin, LookupMixIn, mixins.ParentAssignTypeMixin, NodeNG
):
"""Variation of :class:`ast.Delete` representing deletion of a name.
A :class:`DelName` is the name of something that is deleted.
>>> node = astroid.extract_node("del variable #@")
>>> list(node.get_children())
[<DelName.variable l.1 at 0x7effe1da4d30>]
>>> list(node.get_children())[0].as_string()
'variable'
"""
_other_fields = ("name",)
def __init__(
self,
name: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param name: The name that is being deleted.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.name: Optional[str] = name
"""The name that is being deleted."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
class Name(mixins.NoChildrenMixin, LookupMixIn, NodeNG):
"""Class representing an :class:`ast.Name` node.
A :class:`Name` node is something that is named, but not covered by
:class:`AssignName` or :class:`DelName`.
>>> node = astroid.extract_node('range(10)')
>>> node
<Call l.1 at 0x7effe1db8710>
>>> list(node.get_children())
[<Name.range l.1 at 0x7effe1db86a0>, <Const.int l.1 at 0x7effe1db8518>]
>>> list(node.get_children())[0].as_string()
'range'
"""
_other_fields = ("name",)
def __init__(
self,
name: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param name: The name that this node refers to.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.name: Optional[str] = name
"""The name that this node refers to."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def _get_name_nodes(self):
yield self
for child_node in self.get_children():
yield from child_node._get_name_nodes()
class Arguments(mixins.AssignTypeMixin, NodeNG):
"""Class representing an :class:`ast.arguments` node.
An :class:`Arguments` node represents that arguments in a
function definition.
>>> node = astroid.extract_node('def foo(bar): pass')
>>> node
<FunctionDef.foo l.1 at 0x7effe1db8198>
>>> node.args
<Arguments l.1 at 0x7effe1db82e8>
"""
# Python 3.4+ uses a different approach regarding annotations,
# each argument is a new class, _ast.arg, which exposes an
# 'annotation' attribute. In astroid though, arguments are exposed
# as is in the Arguments node and the only way to expose annotations
# is by using something similar with Python 3.3:
# - we expose 'varargannotation' and 'kwargannotation' of annotations
# of varargs and kwargs.
# - we expose 'annotation', a list with annotations for
# for each normal argument. If an argument doesn't have an
# annotation, its value will be None.
# pylint: disable=too-many-instance-attributes
_astroid_fields = (
"args",
"defaults",
"kwonlyargs",
"posonlyargs",
"posonlyargs_annotations",
"kw_defaults",
"annotations",
"varargannotation",
"kwargannotation",
"kwonlyargs_annotations",
"type_comment_args",
"type_comment_kwonlyargs",
"type_comment_posonlyargs",
)
_other_fields = ("vararg", "kwarg")
def __init__(
self,
vararg: Optional[str] = None,
kwarg: Optional[str] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param vararg: The name of the variable length arguments.
:param kwarg: The name of the variable length keyword arguments.
:param parent: The parent node in the syntax tree.
"""
super().__init__(parent=parent)
self.vararg: Optional[str] = vararg # can be None
"""The name of the variable length arguments."""
self.kwarg: Optional[str] = kwarg # can be None
"""The name of the variable length keyword arguments."""
self.args: typing.List[AssignName]
"""The names of the required arguments."""
self.defaults: typing.List[NodeNG]
"""The default values for arguments that can be passed positionally."""
self.kwonlyargs: typing.List[AssignName]
"""The keyword arguments that cannot be passed positionally."""
self.posonlyargs: typing.List[AssignName] = []
"""The arguments that can only be passed positionally."""
self.kw_defaults: typing.List[Optional[NodeNG]]
"""The default values for keyword arguments that cannot be passed positionally."""
self.annotations: typing.List[Optional[NodeNG]]
"""The type annotations of arguments that can be passed positionally."""
self.posonlyargs_annotations: typing.List[Optional[NodeNG]] = []
"""The type annotations of arguments that can only be passed positionally."""
self.kwonlyargs_annotations: typing.List[Optional[NodeNG]] = []
"""The type annotations of arguments that cannot be passed positionally."""
self.type_comment_args: typing.List[Optional[NodeNG]] = []
"""The type annotation, passed by a type comment, of each argument.
If an argument does not have a type comment,
the value for that argument will be None.
"""
self.type_comment_kwonlyargs: typing.List[Optional[NodeNG]] = []
"""The type annotation, passed by a type comment, of each keyword only argument.
If an argument does not have a type comment,
the value for that argument will be None.
"""
self.type_comment_posonlyargs: typing.List[Optional[NodeNG]] = []
"""The type annotation, passed by a type comment, of each positional argument.
If an argument does not have a type comment,
the value for that argument will be None.
"""
self.varargannotation: Optional[NodeNG] = None # can be None
"""The type annotation for the variable length arguments."""
self.kwargannotation: Optional[NodeNG] = None # can be None
"""The type annotation for the variable length keyword arguments."""
# pylint: disable=too-many-arguments
def postinit(
self,
args: typing.List[AssignName],
defaults: typing.List[NodeNG],
kwonlyargs: typing.List[AssignName],
kw_defaults: typing.List[Optional[NodeNG]],
annotations: typing.List[Optional[NodeNG]],
posonlyargs: Optional[typing.List[AssignName]] = None,
kwonlyargs_annotations: Optional[typing.List[Optional[NodeNG]]] = None,
posonlyargs_annotations: Optional[typing.List[Optional[NodeNG]]] = None,
varargannotation: Optional[NodeNG] = None,
kwargannotation: Optional[NodeNG] = None,
type_comment_args: Optional[typing.List[Optional[NodeNG]]] = None,
type_comment_kwonlyargs: Optional[typing.List[Optional[NodeNG]]] = None,
type_comment_posonlyargs: Optional[typing.List[Optional[NodeNG]]] = None,
) -> None:
"""Do some setup after initialisation.
:param args: The names of the required arguments.
:param defaults: The default values for arguments that can be passed
positionally.
:param kwonlyargs: The keyword arguments that cannot be passed
positionally.
:param posonlyargs: The arguments that can only be passed
positionally.
:param kw_defaults: The default values for keyword arguments that
cannot be passed positionally.
:param annotations: The type annotations of arguments that can be
passed positionally.
:param kwonlyargs_annotations: The type annotations of arguments that
cannot be passed positionally. This should always be passed in
Python 3.
:param posonlyargs_annotations: The type annotations of arguments that
can only be passed positionally. This should always be passed in
Python 3.
:param varargannotation: The type annotation for the variable length
arguments.
:param kwargannotation: The type annotation for the variable length
keyword arguments.
:param type_comment_args: The type annotation,
passed by a type comment, of each argument.
:param type_comment_args: The type annotation,
passed by a type comment, of each keyword only argument.
:param type_comment_args: The type annotation,
passed by a type comment, of each positional argument.
"""
self.args = args
self.defaults = defaults
self.kwonlyargs = kwonlyargs
if posonlyargs is not None:
self.posonlyargs = posonlyargs
self.kw_defaults = kw_defaults
self.annotations = annotations
if kwonlyargs_annotations is not None:
self.kwonlyargs_annotations = kwonlyargs_annotations
if posonlyargs_annotations is not None:
self.posonlyargs_annotations = posonlyargs_annotations
self.varargannotation = varargannotation
self.kwargannotation = kwargannotation
if type_comment_args is not None:
self.type_comment_args = type_comment_args
if type_comment_kwonlyargs is not None:
self.type_comment_kwonlyargs = type_comment_kwonlyargs
if type_comment_posonlyargs is not None:
self.type_comment_posonlyargs = type_comment_posonlyargs
def _infer_name(self, frame, name):
if self.parent is frame:
return name
return None
@decorators.cachedproperty
def fromlineno(self):
"""The first line that this node appears on in the source code.
:type: int or None
"""
lineno = super().fromlineno
return max(lineno, self.parent.fromlineno or 0)
@decorators.cachedproperty
def arguments(self):
"""Get all the arguments for this node, including positional only and positional and keyword"""
return list(itertools.chain((self.posonlyargs or ()), self.args or ()))
def format_args(self):
"""Get the arguments formatted as string.
:returns: The formatted arguments.
:rtype: str
"""
result = []
positional_only_defaults = []
positional_or_keyword_defaults = self.defaults
if self.defaults:
args = self.args or []
positional_or_keyword_defaults = self.defaults[-len(args) :]
positional_only_defaults = self.defaults[: len(self.defaults) - len(args)]
if self.posonlyargs:
result.append(
_format_args(
self.posonlyargs,
positional_only_defaults,
self.posonlyargs_annotations,
)
)
result.append("/")
if self.args:
result.append(
_format_args(
self.args,
positional_or_keyword_defaults,
getattr(self, "annotations", None),
)
)
if self.vararg:
result.append("*%s" % self.vararg)
if self.kwonlyargs:
if not self.vararg:
result.append("*")
result.append(
_format_args(
self.kwonlyargs, self.kw_defaults, self.kwonlyargs_annotations
)
)
if self.kwarg:
result.append("**%s" % self.kwarg)
return ", ".join(result)
def default_value(self, argname):
"""Get the default value for an argument.
:param argname: The name of the argument to get the default value for.
:type argname: str
:raises NoDefault: If there is no default value defined for the
given argument.
"""
args = self.arguments
index = _find_arg(argname, args)[0]
if index is not None:
idx = index - (len(args) - len(self.defaults))
if idx >= 0:
return self.defaults[idx]
index = _find_arg(argname, self.kwonlyargs)[0]
if index is not None and self.kw_defaults[index] is not None:
return self.kw_defaults[index]
raise NoDefault(func=self.parent, name=argname)
def is_argument(self, name):
"""Check if the given name is defined in the arguments.
:param name: The name to check for.
:type name: str
:returns: True if the given name is defined in the arguments,
False otherwise.
:rtype: bool
"""
if name == self.vararg:
return True
if name == self.kwarg:
return True
return (
self.find_argname(name, rec=True)[1] is not None
or self.kwonlyargs
and _find_arg(name, self.kwonlyargs, rec=True)[1] is not None
)
def find_argname(self, argname, rec=False):
"""Get the index and :class:`AssignName` node for given name.
:param argname: The name of the argument to search for.
:type argname: str
:param rec: Whether or not to include arguments in unpacked tuples
in the search.
:type rec: bool
:returns: The index and node for the argument.
:rtype: tuple(str or None, AssignName or None)
"""
if self.arguments:
return _find_arg(argname, self.arguments, rec)
return None, None
def get_children(self):
yield from self.posonlyargs or ()
for elt in self.posonlyargs_annotations:
if elt is not None:
yield elt
yield from self.args or ()
yield from self.defaults
yield from self.kwonlyargs
for elt in self.kw_defaults:
if elt is not None:
yield elt
for elt in self.annotations:
if elt is not None:
yield elt
if self.varargannotation is not None:
yield self.varargannotation
if self.kwargannotation is not None:
yield self.kwargannotation
for elt in self.kwonlyargs_annotations:
if elt is not None:
yield elt
def _find_arg(argname, args, rec=False):
for i, arg in enumerate(args):
if isinstance(arg, Tuple):
if rec:
found = _find_arg(argname, arg.elts)
if found[0] is not None:
return found
elif arg.name == argname:
return i, arg
return None, None
def _format_args(args, defaults=None, annotations=None):
values = []
if args is None:
return ""
if annotations is None:
annotations = []
if defaults is not None:
default_offset = len(args) - len(defaults)
packed = itertools.zip_longest(args, annotations)
for i, (arg, annotation) in enumerate(packed):
if isinstance(arg, Tuple):
values.append("(%s)" % _format_args(arg.elts))
else:
argname = arg.name
default_sep = "="
if annotation is not None:
argname += ": " + annotation.as_string()
default_sep = " = "
values.append(argname)
if defaults is not None and i >= default_offset:
if defaults[i - default_offset] is not None:
values[-1] += default_sep + defaults[i - default_offset].as_string()
return ", ".join(values)
class AssignAttr(mixins.ParentAssignTypeMixin, NodeNG):
"""Variation of :class:`ast.Assign` representing assignment to an attribute.
>>> node = astroid.extract_node('self.attribute = range(10)')
>>> node
<Assign l.1 at 0x7effe1d521d0>
>>> list(node.get_children())
[<AssignAttr.attribute l.1 at 0x7effe1d52320>, <Call l.1 at 0x7effe1d522e8>]
>>> list(node.get_children())[0].as_string()
'self.attribute'
"""
_astroid_fields = ("expr",)
_other_fields = ("attrname",)
def __init__(
self,
attrname: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param attrname: The name of the attribute being assigned to.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.expr: Optional[NodeNG] = None
"""What has the attribute that is being assigned to."""
self.attrname: Optional[str] = attrname
"""The name of the attribute being assigned to."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, expr: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param expr: What has the attribute that is being assigned to.
"""
self.expr = expr
def get_children(self):
yield self.expr
class Assert(Statement):
"""Class representing an :class:`ast.Assert` node.
An :class:`Assert` node represents an assert statement.
>>> node = astroid.extract_node('assert len(things) == 10, "Not enough things"')
>>> node
<Assert l.1 at 0x7effe1d527b8>
"""
_astroid_fields = ("test", "fail")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.test: Optional[NodeNG] = None
"""The test that passes or fails the assertion."""
self.fail: Optional[NodeNG] = None # can be None
"""The message shown when the assertion fails."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self, test: Optional[NodeNG] = None, fail: Optional[NodeNG] = None
) -> None:
"""Do some setup after initialisation.
:param test: The test that passes or fails the assertion.
:param fail: The message shown when the assertion fails.
"""
self.fail = fail
self.test = test
def get_children(self):
yield self.test
if self.fail is not None:
yield self.fail
class Assign(mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.Assign` node.
An :class:`Assign` is a statement where something is explicitly
asssigned to.
>>> node = astroid.extract_node('variable = range(10)')
>>> node
<Assign l.1 at 0x7effe1db8550>
"""
_astroid_fields = ("targets", "value")
_other_other_fields = ("type_annotation",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.targets: typing.List[NodeNG] = []
"""What is being assigned to."""
self.value: Optional[NodeNG] = None
"""The value being assigned to the variables."""
self.type_annotation: Optional[NodeNG] = None
"""If present, this will contain the type annotation passed by a type comment"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
targets: Optional[typing.List[NodeNG]] = None,
value: Optional[NodeNG] = None,
type_annotation: Optional[NodeNG] = None,
) -> None:
"""Do some setup after initialisation.
:param targets: What is being assigned to.
:param value: The value being assigned to the variables.
"""
if targets is not None:
self.targets = targets
self.value = value
self.type_annotation = type_annotation
def get_children(self):
yield from self.targets
yield self.value
@decorators.cached
def _get_assign_nodes(self):
return [self] + list(self.value._get_assign_nodes())
def _get_yield_nodes_skip_lambdas(self):
yield from self.value._get_yield_nodes_skip_lambdas()
class AnnAssign(mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.AnnAssign` node.
An :class:`AnnAssign` is an assignment with a type annotation.
>>> node = astroid.extract_node('variable: List[int] = range(10)')
>>> node
<AnnAssign l.1 at 0x7effe1d4c630>
"""
_astroid_fields = ("target", "annotation", "value")
_other_fields = ("simple",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.target: Optional[NodeNG] = None
"""What is being assigned to."""
self.annotation: Optional[NodeNG] = None
"""The type annotation of what is being assigned to."""
self.value: Optional[NodeNG] = None # can be None
"""The value being assigned to the variables."""
self.simple: Optional[int] = None
"""Whether :attr:`target` is a pure name or a complex statement."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
target: NodeNG,
annotation: NodeNG,
simple: int,
value: Optional[NodeNG] = None,
) -> None:
"""Do some setup after initialisation.
:param target: What is being assigned to.
:param annotation: The type annotation of what is being assigned to.
:param simple: Whether :attr:`target` is a pure name
or a complex statement.
:param value: The value being assigned to the variables.
"""
self.target = target
self.annotation = annotation
self.value = value
self.simple = simple
def get_children(self):
yield self.target
yield self.annotation
if self.value is not None:
yield self.value
class AugAssign(mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.AugAssign` node.
An :class:`AugAssign` is an assignment paired with an operator.
>>> node = astroid.extract_node('variable += 1')
>>> node
<AugAssign l.1 at 0x7effe1db4d68>
"""
_astroid_fields = ("target", "value")
_other_fields = ("op",)
def __init__(
self,
op: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param op: The operator that is being combined with the assignment.
This includes the equals sign.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.target: Optional[NodeNG] = None
"""What is being assigned to."""
self.op: Optional[str] = op
"""The operator that is being combined with the assignment.
This includes the equals sign.
"""
self.value: Optional[NodeNG] = None
"""The value being assigned to the variable."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self, target: Optional[NodeNG] = None, value: Optional[NodeNG] = None
) -> None:
"""Do some setup after initialisation.
:param target: What is being assigned to.
:param value: The value being assigned to the variable.
"""
self.target = target
self.value = value
# This is set by inference.py
def _infer_augassign(self, context=None):
raise NotImplementedError
def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage` ,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_augassign(context=context)
return [
result
for result in results
if isinstance(result, util.BadBinaryOperationMessage)
]
except InferenceError:
return []
def get_children(self):
yield self.target
yield self.value
def _get_yield_nodes_skip_lambdas(self):
"""An AugAssign node can contain a Yield node in the value"""
yield from self.value._get_yield_nodes_skip_lambdas()
yield from super()._get_yield_nodes_skip_lambdas()
class BinOp(NodeNG):
"""Class representing an :class:`ast.BinOp` node.
A :class:`BinOp` node is an application of a binary operator.
>>> node = astroid.extract_node('a + b')
>>> node
<BinOp l.1 at 0x7f23b2e8cfd0>
"""
_astroid_fields = ("left", "right")
_other_fields = ("op",)
def __init__(
self,
op: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param op: The operator.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.left: Optional[NodeNG] = None
"""What is being applied to the operator on the left side."""
self.op: Optional[str] = op
"""The operator."""
self.right: Optional[NodeNG] = None
"""What is being applied to the operator on the right side."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self, left: Optional[NodeNG] = None, right: Optional[NodeNG] = None
) -> None:
"""Do some setup after initialisation.
:param left: What is being applied to the operator on the left side.
:param right: What is being applied to the operator on the right side.
"""
self.left = left
self.right = right
# This is set by inference.py
def _infer_binop(self, context=None):
raise NotImplementedError
def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_binop(context=context)
return [
result
for result in results
if isinstance(result, util.BadBinaryOperationMessage)
]
except InferenceError:
return []
def get_children(self):
yield self.left
yield self.right
def op_precedence(self):
return OP_PRECEDENCE[self.op]
def op_left_associative(self):
# 2**3**4 == 2**(3**4)
return self.op != "**"
class BoolOp(NodeNG):
"""Class representing an :class:`ast.BoolOp` node.
A :class:`BoolOp` is an application of a boolean operator.
>>> node = astroid.extract_node('a and b')
>>> node
<BinOp l.1 at 0x7f23b2e71c50>
"""
_astroid_fields = ("values",)
_other_fields = ("op",)
def __init__(
self,
op: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param op: The operator.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.op: Optional[str] = op
"""The operator."""
self.values: typing.List[NodeNG] = []
"""The values being applied to the operator."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, values: Optional[typing.List[NodeNG]] = None) -> None:
"""Do some setup after initialisation.
:param values: The values being applied to the operator.
"""
if values is not None:
self.values = values
def get_children(self):
yield from self.values
def op_precedence(self):
return OP_PRECEDENCE[self.op]
class Break(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Break` node.
>>> node = astroid.extract_node('break')
>>> node
<Break l.1 at 0x7f23b2e9e5c0>
"""
class Call(NodeNG):
"""Class representing an :class:`ast.Call` node.
A :class:`Call` node is a call to a function, method, etc.
>>> node = astroid.extract_node('function()')
>>> node
<Call l.1 at 0x7f23b2e71eb8>
"""
_astroid_fields = ("func", "args", "keywords")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.func: Optional[NodeNG] = None
"""What is being called."""
self.args: typing.List[NodeNG] = []
"""The positional arguments being given to the call."""
self.keywords: typing.List["Keyword"] = []
"""The keyword arguments being given to the call."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
func: Optional[NodeNG] = None,
args: Optional[typing.List[NodeNG]] = None,
keywords: Optional[typing.List["Keyword"]] = None,
) -> None:
"""Do some setup after initialisation.
:param func: What is being called.
:param args: The positional arguments being given to the call.
:param keywords: The keyword arguments being given to the call.
"""
self.func = func
if args is not None:
self.args = args
if keywords is not None:
self.keywords = keywords
@property
def starargs(self) -> typing.List["Starred"]:
"""The positional arguments that unpack something."""
return [arg for arg in self.args if isinstance(arg, Starred)]
@property
def kwargs(self) -> typing.List["Keyword"]:
"""The keyword arguments that unpack something."""
return [keyword for keyword in self.keywords if keyword.arg is None]
def get_children(self):
yield self.func
yield from self.args
yield from self.keywords
class Compare(NodeNG):
"""Class representing an :class:`ast.Compare` node.
A :class:`Compare` node indicates a comparison.
>>> node = astroid.extract_node('a <= b <= c')
>>> node
<Compare l.1 at 0x7f23b2e9e6d8>
>>> node.ops
[('<=', <Name.b l.1 at 0x7f23b2e9e2b0>), ('<=', <Name.c l.1 at 0x7f23b2e9e390>)]
"""
_astroid_fields = ("left", "ops")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.left: Optional[NodeNG] = None
"""The value at the left being applied to a comparison operator."""
self.ops: typing.List[typing.Tuple[str, NodeNG]] = []
"""The remainder of the operators and their relevant right hand value."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
left: Optional[NodeNG] = None,
ops: Optional[typing.List[typing.Tuple[str, NodeNG]]] = None,
) -> None:
"""Do some setup after initialisation.
:param left: The value at the left being applied to a comparison
operator.
:param ops: The remainder of the operators
and their relevant right hand value.
"""
self.left = left
if ops is not None:
self.ops = ops
def get_children(self):
"""Get the child nodes below this node.
Overridden to handle the tuple fields and skip returning the operator
strings.
:returns: The children.
:rtype: iterable(NodeNG)
"""
yield self.left
for _, comparator in self.ops:
yield comparator # we don't want the 'op'
def last_child(self):
"""An optimized version of list(get_children())[-1]
:returns: The last child.
:rtype: NodeNG
"""
# XXX maybe if self.ops:
return self.ops[-1][1]
# return self.left
class Comprehension(NodeNG):
"""Class representing an :class:`ast.comprehension` node.
A :class:`Comprehension` indicates the loop inside any type of
comprehension including generator expressions.
>>> node = astroid.extract_node('[x for x in some_values]')
>>> list(node.get_children())
[<Name.x l.1 at 0x7f23b2e352b0>, <Comprehension l.1 at 0x7f23b2e35320>]
>>> list(node.get_children())[1].as_string()
'for x in some_values'
"""
_astroid_fields = ("target", "iter", "ifs")
_other_fields = ("is_async",)
optional_assign = True
"""Whether this node optionally assigns a variable."""
def __init__(self, parent: Optional[NodeNG] = None) -> None:
"""
:param parent: The parent node in the syntax tree.
"""
self.target: Optional[NodeNG] = None
"""What is assigned to by the comprehension."""
self.iter: Optional[NodeNG] = None
"""What is iterated over by the comprehension."""
self.ifs: typing.List[NodeNG] = []
"""The contents of any if statements that filter the comprehension."""
self.is_async: Optional[bool] = None
"""Whether this is an asynchronous comprehension or not."""
super().__init__(parent=parent)
# pylint: disable=redefined-builtin; same name as builtin ast module.
def postinit(
self,
target: Optional[NodeNG] = None,
iter: Optional[NodeNG] = None,
ifs: Optional[typing.List[NodeNG]] = None,
is_async: Optional[bool] = None,
) -> None:
"""Do some setup after initialisation.
:param target: What is assigned to by the comprehension.
:param iter: What is iterated over by the comprehension.
:param ifs: The contents of any if statements that filter
the comprehension.
:param is_async: Whether this is an asynchronous comprehension or not.
"""
self.target = target
self.iter = iter
if ifs is not None:
self.ifs = ifs
self.is_async = is_async
def assign_type(self):
"""The type of assignment that this node performs.
:returns: The assignment type.
:rtype: NodeNG
"""
return self
def _get_filtered_stmts(self, lookup_node, node, stmts, mystmt):
"""method used in filter_stmts"""
if self is mystmt:
if isinstance(lookup_node, (Const, Name)):
return [lookup_node], True
elif self.statement() is mystmt:
# original node's statement is the assignment, only keeps
# current node (gen exp, list comp)
return [node], True
return stmts, False
def get_children(self):
yield self.target
yield self.iter
yield from self.ifs
class Const(mixins.NoChildrenMixin, NodeNG, bases.Instance):
"""Class representing any constant including num, str, bool, None, bytes.
>>> node = astroid.extract_node('(5, "This is a string.", True, None, b"bytes")')
>>> node
<Tuple.tuple l.1 at 0x7f23b2e358d0>
>>> list(node.get_children())
[<Const.int l.1 at 0x7f23b2e35940>,
<Const.str l.1 at 0x7f23b2e35978>,
<Const.bool l.1 at 0x7f23b2e359b0>,
<Const.NoneType l.1 at 0x7f23b2e359e8>,
<Const.bytes l.1 at 0x7f23b2e35a20>]
"""
_other_fields = ("value",)
def __init__(
self,
value: typing.Any,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
kind: Optional[str] = None,
) -> None:
"""
:param value: The value that the constant represents.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
:param kind: The string prefix. "u" for u-prefixed strings and ``None`` otherwise. Python 3.8+ only.
"""
self.value: typing.Any = value
"""The value that the constant represents."""
self.kind: Optional[str] = kind # can be None
""""The string prefix. "u" for u-prefixed strings and ``None`` otherwise. Python 3.8+ only."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def __getattr__(self, name):
# This is needed because of Proxy's __getattr__ method.
# Calling object.__new__ on this class without calling
# __init__ would result in an infinite loop otherwise
# since __getattr__ is called when an attribute doesn't
# exist and self._proxied indirectly calls self.value
# and Proxy __getattr__ calls self.value
if name == "value":
raise AttributeError
return super().__getattr__(name)
def getitem(self, index, context=None):
"""Get an item from this node if subscriptable.
:param index: The node to use as a subscript index.
:type index: Const or Slice
:raises AstroidTypeError: When the given index cannot be used as a
subscript index, or if this node is not subscriptable.
"""
if isinstance(index, Const):
index_value = index.value
elif isinstance(index, Slice):
index_value = _infer_slice(index, context=context)
else:
raise AstroidTypeError(
f"Could not use type {type(index)} as subscript index"
)
try:
if isinstance(self.value, (str, bytes)):
return Const(self.value[index_value])
except IndexError as exc:
raise AstroidIndexError(
message="Index {index!r} out of range",
node=self,
index=index,
context=context,
) from exc
except TypeError as exc:
raise AstroidTypeError(
message="Type error {error!r}", node=self, index=index, context=context
) from exc
raise AstroidTypeError(f"{self!r} (value={self.value})")
def has_dynamic_getattr(self):
"""Check if the node has a custom __getattr__ or __getattribute__.
:returns: True if the class has a custom
__getattr__ or __getattribute__, False otherwise.
For a :class:`Const` this is always ``False``.
:rtype: bool
"""
return False
def itered(self):
"""An iterator over the elements this node contains.
:returns: The contents of this node.
:rtype: iterable(Const)
:raises TypeError: If this node does not represent something that is iterable.
"""
if isinstance(self.value, str):
return [const_factory(elem) for elem in self.value]
raise TypeError(f"Cannot iterate over type {type(self.value)!r}")
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return self._proxied.qname()
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
:rtype: bool
"""
return bool(self.value)
class Continue(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Continue` node.
>>> node = astroid.extract_node('continue')
>>> node
<Continue l.1 at 0x7f23b2e35588>
"""
class Decorators(NodeNG):
"""A node representing a list of decorators.
A :class:`Decorators` is the decorators that are applied to
a method or function.
>>> node = astroid.extract_node('''
@property
def my_property(self):
return 3
''')
>>> node
<FunctionDef.my_property l.2 at 0x7f23b2e35d30>
>>> list(node.get_children())[0]
<Decorators l.1 at 0x7f23b2e35d68>
"""
_astroid_fields = ("nodes",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.nodes: typing.List[NodeNG]
"""The decorators that this node contains.
:type: list(Name or Call) or None
"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, nodes: typing.List[NodeNG]) -> None:
"""Do some setup after initialisation.
:param nodes: The decorators that this node contains.
:type nodes: list(Name or Call)
"""
self.nodes = nodes
def scope(self):
"""The first parent node defining a new scope.
:returns: The first parent scope node.
:rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr
"""
# skip the function node to go directly to the upper level scope
return self.parent.parent.scope()
def get_children(self):
yield from self.nodes
class DelAttr(mixins.ParentAssignTypeMixin, NodeNG):
"""Variation of :class:`ast.Delete` representing deletion of an attribute.
>>> node = astroid.extract_node('del self.attr')
>>> node
<Delete l.1 at 0x7f23b2e35f60>
>>> list(node.get_children())[0]
<DelAttr.attr l.1 at 0x7f23b2e411d0>
"""
_astroid_fields = ("expr",)
_other_fields = ("attrname",)
def __init__(
self,
attrname: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param attrname: The name of the attribute that is being deleted.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.expr: Optional[NodeNG] = None
"""The name that this node represents.
:type: Name or None
"""
self.attrname: Optional[str] = attrname
"""The name of the attribute that is being deleted."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, expr: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param expr: The name that this node represents.
:type expr: Name or None
"""
self.expr = expr
def get_children(self):
yield self.expr
class Delete(mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.Delete` node.
A :class:`Delete` is a ``del`` statement this is deleting something.
>>> node = astroid.extract_node('del self.attr')
>>> node
<Delete l.1 at 0x7f23b2e35f60>
"""
_astroid_fields = ("targets",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.targets: typing.List[NodeNG] = []
"""What is being deleted."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, targets: Optional[typing.List[NodeNG]] = None) -> None:
"""Do some setup after initialisation.
:param targets: What is being deleted.
"""
if targets is not None:
self.targets = targets
def get_children(self):
yield from self.targets
class Dict(NodeNG, bases.Instance):
"""Class representing an :class:`ast.Dict` node.
A :class:`Dict` is a dictionary that is created with ``{}`` syntax.
>>> node = astroid.extract_node('{1: "1"}')
>>> node
<Dict.dict l.1 at 0x7f23b2e35cc0>
"""
_astroid_fields = ("items",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.items: typing.List[typing.Tuple[NodeNG, NodeNG]] = []
"""The key-value pairs contained in the dictionary."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, items: typing.List[typing.Tuple[NodeNG, NodeNG]]) -> None:
"""Do some setup after initialisation.
:param items: The key-value pairs contained in the dictionary.
"""
self.items = items
@classmethod
def from_elements(cls, items=None):
"""Create a :class:`Dict` of constants from a live dictionary.
:param items: The items to store in the node.
:type items: dict
:returns: The created dictionary node.
:rtype: Dict
"""
node = cls()
if items is None:
node.items = []
else:
node.items = [
(const_factory(k), const_factory(v) if _is_const(v) else v)
for k, v in items.items()
# The keys need to be constants
if _is_const(k)
]
return node
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.dict" % BUILTINS
def get_children(self):
"""Get the key and value nodes below this node.
Children are returned in the order that they are defined in the source
code, key first then the value.
:returns: The children.
:rtype: iterable(NodeNG)
"""
for key, value in self.items:
yield key
yield value
def last_child(self):
"""An optimized version of list(get_children())[-1]
:returns: The last child, or None if no children exist.
:rtype: NodeNG or None
"""
if self.items:
return self.items[-1][1]
return None
def itered(self):
"""An iterator over the keys this node contains.
:returns: The keys of this node.
:rtype: iterable(NodeNG)
"""
return [key for (key, _) in self.items]
def getitem(self, index, context=None):
"""Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
:raises AstroidTypeError: When the given index cannot be used as a
subscript index, or if this node is not subscriptable.
:raises AstroidIndexError: If the given index does not exist in the
dictionary.
"""
for key, value in self.items:
# TODO(cpopa): no support for overriding yet, {1:2, **{1: 3}}.
if isinstance(key, DictUnpack):
try:
return value.getitem(index, context)
except (AstroidTypeError, AstroidIndexError):
continue
for inferredkey in key.infer(context):
if inferredkey is util.Uninferable:
continue
if isinstance(inferredkey, Const) and isinstance(index, Const):
if inferredkey.value == index.value:
return value
raise AstroidIndexError(index)
def bool_value(self, context=None):
"""Determine the boolean value of this node.
:returns: The boolean value of this node.
:rtype: bool
"""
return bool(self.items)
class Expr(Statement):
"""Class representing an :class:`ast.Expr` node.
An :class:`Expr` is any expression that does not have its value used or
stored.
>>> node = astroid.extract_node('method()')
>>> node
<Call l.1 at 0x7f23b2e352b0>
>>> node.parent
<Expr l.1 at 0x7f23b2e35278>
"""
_astroid_fields = ("value",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.value: Optional[NodeNG] = None
"""What the expression does."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, value: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param value: What the expression does.
"""
self.value = value
def get_children(self):
yield self.value
def _get_yield_nodes_skip_lambdas(self):
if not self.value.is_lambda:
yield from self.value._get_yield_nodes_skip_lambdas()
class Ellipsis(mixins.NoChildrenMixin, NodeNG): # pylint: disable=redefined-builtin
"""Class representing an :class:`ast.Ellipsis` node.
An :class:`Ellipsis` is the ``...`` syntax.
Deprecated since v2.6.0 - Use :class:`Const` instead.
Will be removed with the release v2.7.0
"""
class EmptyNode(mixins.NoChildrenMixin, NodeNG):
"""Holds an arbitrary object in the :attr:`LocalsDictNodeNG.locals`."""
object = None
class ExceptHandler(mixins.MultiLineBlockMixin, mixins.AssignTypeMixin, Statement):
"""Class representing an :class:`ast.ExceptHandler`. node.
An :class:`ExceptHandler` is an ``except`` block on a try-except.
>>> node = astroid.extract_node('''
try:
do_something()
except Exception as error:
print("Error!")
''')
>>> node
<TryExcept l.2 at 0x7f23b2e9d908>
>>> >>> node.handlers
[<ExceptHandler l.4 at 0x7f23b2e9e860>]
"""
_astroid_fields = ("type", "name", "body")
_multi_line_block_fields = ("body",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.type: Optional[NodeNG] = None # can be None
"""The types that the block handles.
:type: Tuple or NodeNG or None
"""
self.name: Optional[AssignName] = None # can be None
"""The name that the caught exception is assigned to."""
self.body: typing.List[NodeNG] = []
"""The contents of the block."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def get_children(self):
if self.type is not None:
yield self.type
if self.name is not None:
yield self.name
yield from self.body
# pylint: disable=redefined-builtin; had to use the same name as builtin ast module.
def postinit(
self,
type: Optional[NodeNG] = None,
name: Optional[AssignName] = None,
body: Optional[typing.List[NodeNG]] = None,
) -> None:
"""Do some setup after initialisation.
:param type: The types that the block handles.
:type type: Tuple or NodeNG or None
:param name: The name that the caught exception is assigned to.
:param body:The contents of the block.
"""
self.type = type
self.name = name
if body is not None:
self.body = body
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
if self.name:
return self.name.tolineno
if self.type:
return self.type.tolineno
return self.lineno
def catch(self, exceptions: Optional[typing.List[str]]) -> bool:
"""Check if this node handles any of the given
:param exceptions: The names of the exceptions to check for.
"""
if self.type is None or exceptions is None:
return True
for node in self.type._get_name_nodes():
if node.name in exceptions:
return True
return False
class ExtSlice(NodeNG):
"""Class representing an :class:`ast.ExtSlice` node.
An :class:`ExtSlice` is a complex slice expression.
Deprecated since v2.6.0 - Now part of the :class:`Subscript` node.
Will be removed with the release of v2.7.0
"""
class For(
mixins.MultiLineBlockMixin,
mixins.BlockRangeMixIn,
mixins.AssignTypeMixin,
Statement,
):
"""Class representing an :class:`ast.For` node.
>>> node = astroid.extract_node('for thing in things: print(thing)')
>>> node
<For l.1 at 0x7f23b2e8cf28>
"""
_astroid_fields = ("target", "iter", "body", "orelse")
_other_other_fields = ("type_annotation",)
_multi_line_block_fields = ("body", "orelse")
optional_assign = True
"""Whether this node optionally assigns a variable.
This is always ``True`` for :class:`For` nodes.
"""
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.target: Optional[NodeNG] = None
"""What the loop assigns to."""
self.iter: Optional[NodeNG] = None
"""What the loop iterates over."""
self.body: typing.List[NodeNG] = []
"""The contents of the body of the loop."""
self.orelse: typing.List[NodeNG] = []
"""The contents of the ``else`` block of the loop."""
self.type_annotation: Optional[NodeNG] = None # can be None
"""If present, this will contain the type annotation passed by a type comment"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
# pylint: disable=redefined-builtin; had to use the same name as builtin ast module.
def postinit(
self,
target: Optional[NodeNG] = None,
iter: Optional[NodeNG] = None,
body: Optional[typing.List[NodeNG]] = None,
orelse: Optional[typing.List[NodeNG]] = None,
type_annotation: Optional[NodeNG] = None,
) -> None:
"""Do some setup after initialisation.
:param target: What the loop assigns to.
:param iter: What the loop iterates over.
:param body: The contents of the body of the loop.
:param orelse: The contents of the ``else`` block of the loop.
"""
self.target = target
self.iter = iter
if body is not None:
self.body = body
if orelse is not None:
self.orelse = orelse
self.type_annotation = type_annotation
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.iter.tolineno
def get_children(self):
yield self.target
yield self.iter
yield from self.body
yield from self.orelse
class AsyncFor(For):
"""Class representing an :class:`ast.AsyncFor` node.
An :class:`AsyncFor` is an asynchronous :class:`For` built with
the ``async`` keyword.
>>> node = astroid.extract_node('''
async def func(things):
async for thing in things:
print(thing)
''')
>>> node
<AsyncFunctionDef.func l.2 at 0x7f23b2e416d8>
>>> node.body[0]
<AsyncFor l.3 at 0x7f23b2e417b8>
"""
class Await(NodeNG):
"""Class representing an :class:`ast.Await` node.
An :class:`Await` is the ``await`` keyword.
>>> node = astroid.extract_node('''
async def func(things):
await other_func()
''')
>>> node
<AsyncFunctionDef.func l.2 at 0x7f23b2e41748>
>>> node.body[0]
<Expr l.3 at 0x7f23b2e419e8>
>>> list(node.body[0].get_children())[0]
<Await l.3 at 0x7f23b2e41a20>
"""
_astroid_fields = ("value",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.value: Optional[NodeNG] = None
"""What to wait for."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, value: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param value: What to wait for.
"""
self.value = value
def get_children(self):
yield self.value
class ImportFrom(mixins.NoChildrenMixin, mixins.ImportFromMixin, Statement):
"""Class representing an :class:`ast.ImportFrom` node.
>>> node = astroid.extract_node('from my_package import my_module')
>>> node
<ImportFrom l.1 at 0x7f23b2e415c0>
"""
_other_fields = ("modname", "names", "level")
def __init__(
self,
fromname: Optional[str],
names: typing.List[typing.Tuple[str, Optional[str]]],
level: Optional[int] = 0,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param fromname: The module that is being imported from.
:param names: What is being imported from the module.
:param level: The level of relative import.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.modname: Optional[str] = fromname # can be None
"""The module that is being imported from.
This is ``None`` for relative imports.
"""
self.names: typing.List[typing.Tuple[str, Optional[str]]] = names
"""What is being imported from the module.
Each entry is a :class:`tuple` of the name being imported,
and the alias that the name is assigned to (if any).
"""
# TODO When is 'level' None?
self.level: Optional[int] = level # can be None
"""The level of relative import.
Essentially this is the number of dots in the import.
This is always 0 for absolute imports.
"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
class Attribute(NodeNG):
"""Class representing an :class:`ast.Attribute` node."""
_astroid_fields = ("expr",)
_other_fields = ("attrname",)
def __init__(
self,
attrname: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param attrname: The name of the attribute.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.expr: Optional[NodeNG] = None
"""The name that this node represents.
:type: Name or None
"""
self.attrname: Optional[str] = attrname
"""The name of the attribute."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, expr: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param expr: The name that this node represents.
:type expr: Name or None
"""
self.expr = expr
def get_children(self):
yield self.expr
class Global(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Global` node.
>>> node = astroid.extract_node('global a_global')
>>> node
<Global l.1 at 0x7f23b2e9de10>
"""
_other_fields = ("names",)
def __init__(
self,
names: typing.List[str],
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param names: The names being declared as global.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.names: typing.List[str] = names
"""The names being declared as global."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def _infer_name(self, frame, name):
return name
class If(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement):
"""Class representing an :class:`ast.If` node.
>>> node = astroid.extract_node('if condition: print(True)')
>>> node
<If l.1 at 0x7f23b2e9dd30>
"""
_astroid_fields = ("test", "body", "orelse")
_multi_line_block_fields = ("body", "orelse")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.test: Optional[NodeNG] = None
"""The condition that the statement tests."""
self.body: typing.List[NodeNG] = []
"""The contents of the block."""
self.orelse: typing.List[NodeNG] = []
"""The contents of the ``else`` block."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
test: Optional[NodeNG] = None,
body: Optional[typing.List[NodeNG]] = None,
orelse: Optional[typing.List[NodeNG]] = None,
) -> None:
"""Do some setup after initialisation.
:param test: The condition that the statement tests.
:param body: The contents of the block.
:param orelse: The contents of the ``else`` block.
"""
self.test = test
if body is not None:
self.body = body
if orelse is not None:
self.orelse = orelse
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.test.tolineno
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
if lineno == self.body[0].fromlineno:
return lineno, lineno
if lineno <= self.body[-1].tolineno:
return lineno, self.body[-1].tolineno
return self._elsed_block_range(lineno, self.orelse, self.body[0].fromlineno - 1)
def get_children(self):
yield self.test
yield from self.body
yield from self.orelse
def has_elif_block(self):
return len(self.orelse) == 1 and isinstance(self.orelse[0], If)
def _get_yield_nodes_skip_lambdas(self):
"""An If node can contain a Yield node in the test"""
yield from self.test._get_yield_nodes_skip_lambdas()
yield from super()._get_yield_nodes_skip_lambdas()
class IfExp(NodeNG):
"""Class representing an :class:`ast.IfExp` node.
>>> node = astroid.extract_node('value if condition else other')
>>> node
<IfExp l.1 at 0x7f23b2e9dbe0>
"""
_astroid_fields = ("test", "body", "orelse")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.test: Optional[NodeNG] = None
"""The condition that the statement tests."""
self.body: Optional[NodeNG] = None
"""The contents of the block."""
self.orelse: Optional[NodeNG] = None
"""The contents of the ``else`` block."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
test: Optional[NodeNG] = None,
body: Optional[NodeNG] = None,
orelse: Optional[NodeNG] = None,
) -> None:
"""Do some setup after initialisation.
:param test: The condition that the statement tests.
:param body: The contents of the block.
:param orelse: The contents of the ``else`` block.
"""
self.test = test
self.body = body
self.orelse = orelse
def get_children(self):
yield self.test
yield self.body
yield self.orelse
def op_left_associative(self):
# `1 if True else 2 if False else 3` is parsed as
# `1 if True else (2 if False else 3)`
return False
class Import(mixins.NoChildrenMixin, mixins.ImportFromMixin, Statement):
"""Class representing an :class:`ast.Import` node.
>>> node = astroid.extract_node('import astroid')
>>> node
<Import l.1 at 0x7f23b2e4e5c0>
"""
_other_fields = ("names",)
def __init__(
self,
names: Optional[typing.List[typing.Tuple[str, Optional[str]]]] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param names: The names being imported.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.names: typing.List[typing.Tuple[str, Optional[str]]] = names or []
"""The names being imported.
Each entry is a :class:`tuple` of the name being imported,
and the alias that the name is assigned to (if any).
"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
class Index(NodeNG):
"""Class representing an :class:`ast.Index` node.
An :class:`Index` is a simple subscript.
Deprecated since v2.6.0 - Now part of the :class:`Subscript` node.
Will be removed with the release of v2.7.0
"""
class Keyword(NodeNG):
"""Class representing an :class:`ast.keyword` node.
>>> node = astroid.extract_node('function(a_kwarg=True)')
>>> node
<Call l.1 at 0x7f23b2e9e320>
>>> node.keywords
[<Keyword l.1 at 0x7f23b2e9e9b0>]
"""
_astroid_fields = ("value",)
_other_fields = ("arg",)
def __init__(
self,
arg: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param arg: The argument being assigned to.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.arg: Optional[str] = arg # can be None
"""The argument being assigned to."""
self.value: Optional[NodeNG] = None
"""The value being assigned to the keyword argument."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, value: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param value: The value being assigned to the ketword argument.
"""
self.value = value
def get_children(self):
yield self.value
class List(_BaseContainer):
"""Class representing an :class:`ast.List` node.
>>> node = astroid.extract_node('[1, 2, 3]')
>>> node
<List.list l.1 at 0x7f23b2e9e128>
"""
_other_fields = ("ctx",)
def __init__(
self,
ctx: Optional[Context] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param ctx: Whether the list is assigned to or loaded from.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.ctx: Optional[Context] = ctx
"""Whether the list is assigned to or loaded from."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.list" % BUILTINS
def getitem(self, index, context=None):
"""Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
"""
return _container_getitem(self, self.elts, index, context=context)
class Nonlocal(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Nonlocal` node.
>>> node = astroid.extract_node('''
def function():
nonlocal var
''')
>>> node
<FunctionDef.function l.2 at 0x7f23b2e9e208>
>>> node.body[0]
<Nonlocal l.3 at 0x7f23b2e9e908>
"""
_other_fields = ("names",)
def __init__(
self,
names: typing.List[str],
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param names: The names being declared as not local.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.names: typing.List[str] = names
"""The names being declared as not local."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def _infer_name(self, frame, name):
return name
class Pass(mixins.NoChildrenMixin, Statement):
"""Class representing an :class:`ast.Pass` node.
>>> node = astroid.extract_node('pass')
>>> node
<Pass l.1 at 0x7f23b2e9e748>
"""
class Raise(Statement):
"""Class representing an :class:`ast.Raise` node.
>>> node = astroid.extract_node('raise RuntimeError("Something bad happened!")')
>>> node
<Raise l.1 at 0x7f23b2e9e828>
"""
_astroid_fields = ("exc", "cause")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.exc: Optional[NodeNG] = None # can be None
"""What is being raised."""
self.cause: Optional[NodeNG] = None # can be None
"""The exception being used to raise this one."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
exc: Optional[NodeNG] = None,
cause: Optional[NodeNG] = None,
) -> None:
"""Do some setup after initialisation.
:param exc: What is being raised.
:param cause: The exception being used to raise this one.
"""
self.exc = exc
self.cause = cause
def raises_not_implemented(self):
"""Check if this node raises a :class:`NotImplementedError`.
:returns: True if this node raises a :class:`NotImplementedError`,
False otherwise.
:rtype: bool
"""
if not self.exc:
return False
for name in self.exc._get_name_nodes():
if name.name == "NotImplementedError":
return True
return False
def get_children(self):
if self.exc is not None:
yield self.exc
if self.cause is not None:
yield self.cause
class Return(Statement):
"""Class representing an :class:`ast.Return` node.
>>> node = astroid.extract_node('return True')
>>> node
<Return l.1 at 0x7f23b8211908>
"""
_astroid_fields = ("value",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.value: Optional[NodeNG] = None # can be None
"""The value being returned."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, value: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param value: The value being returned.
"""
self.value = value
def get_children(self):
if self.value is not None:
yield self.value
def is_tuple_return(self):
return isinstance(self.value, Tuple)
def _get_return_nodes_skip_functions(self):
yield self
class Set(_BaseContainer):
"""Class representing an :class:`ast.Set` node.
>>> node = astroid.extract_node('{1, 2, 3}')
>>> node
<Set.set l.1 at 0x7f23b2e71d68>
"""
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.set" % BUILTINS
class Slice(NodeNG):
"""Class representing an :class:`ast.Slice` node.
>>> node = astroid.extract_node('things[1:3]')
>>> node
<Subscript l.1 at 0x7f23b2e71f60>
>>> node.slice
<Slice l.1 at 0x7f23b2e71e80>
"""
_astroid_fields = ("lower", "upper", "step")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.lower: Optional[NodeNG] = None # can be None
"""The lower index in the slice."""
self.upper: Optional[NodeNG] = None # can be None
"""The upper index in the slice."""
self.step: Optional[NodeNG] = None # can be None
"""The step to take between indexes."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
lower: Optional[NodeNG] = None,
upper: Optional[NodeNG] = None,
step: Optional[NodeNG] = None,
) -> None:
"""Do some setup after initialisation.
:param lower: The lower index in the slice.
:param upper: The upper index in the slice.
:param step: The step to take between index.
"""
self.lower = lower
self.upper = upper
self.step = step
def _wrap_attribute(self, attr):
"""Wrap the empty attributes of the Slice in a Const node."""
if not attr:
const = const_factory(attr)
const.parent = self
return const
return attr
@decorators.cachedproperty
def _proxied(self):
builtins = AstroidManager().builtins_module
return builtins.getattr("slice")[0]
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.slice" % BUILTINS
def igetattr(self, attrname, context=None):
"""Infer the possible values of the given attribute on the slice.
:param attrname: The name of the attribute to infer.
:type attrname: str
:returns: The inferred possible values.
:rtype: iterable(NodeNG)
"""
if attrname == "start":
yield self._wrap_attribute(self.lower)
elif attrname == "stop":
yield self._wrap_attribute(self.upper)
elif attrname == "step":
yield self._wrap_attribute(self.step)
else:
yield from self.getattr(attrname, context=context)
def getattr(self, attrname, context=None):
return self._proxied.getattr(attrname, context)
def get_children(self):
if self.lower is not None:
yield self.lower
if self.upper is not None:
yield self.upper
if self.step is not None:
yield self.step
class Starred(mixins.ParentAssignTypeMixin, NodeNG):
"""Class representing an :class:`ast.Starred` node.
>>> node = astroid.extract_node('*args')
>>> node
<Starred l.1 at 0x7f23b2e41978>
"""
_astroid_fields = ("value",)
_other_fields = ("ctx",)
def __init__(
self,
ctx: Optional[Context] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param ctx: Whether the list is assigned to or loaded from.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.value: Optional[NodeNG] = None
"""What is being unpacked."""
self.ctx: Optional[Context] = ctx
"""Whether the starred item is assigned to or loaded from."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, value: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param value: What is being unpacked.
"""
self.value = value
def get_children(self):
yield self.value
class Subscript(NodeNG):
"""Class representing an :class:`ast.Subscript` node.
>>> node = astroid.extract_node('things[1:3]')
>>> node
<Subscript l.1 at 0x7f23b2e71f60>
"""
_astroid_fields = ("value", "slice")
_other_fields = ("ctx",)
def __init__(
self,
ctx: Optional[Context] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param ctx: Whether the subscripted item is assigned to or loaded from.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.value: Optional[NodeNG] = None
"""What is being indexed."""
self.slice: Optional[NodeNG] = None
"""The slice being used to lookup."""
self.ctx: Optional[Context] = ctx
"""Whether the subscripted item is assigned to or loaded from."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
# pylint: disable=redefined-builtin; had to use the same name as builtin ast module.
def postinit(
self, value: Optional[NodeNG] = None, slice: Optional[NodeNG] = None
) -> None:
"""Do some setup after initialisation.
:param value: What is being indexed.
:param slice: The slice being used to lookup.
"""
self.value = value
self.slice = slice
def get_children(self):
yield self.value
yield self.slice
class TryExcept(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement):
"""Class representing an :class:`ast.TryExcept` node.
>>> node = astroid.extract_node('''
try:
do_something()
except Exception as error:
print("Error!")
''')
>>> node
<TryExcept l.2 at 0x7f23b2e9d908>
"""
_astroid_fields = ("body", "handlers", "orelse")
_multi_line_block_fields = ("body", "handlers", "orelse")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.body: typing.List[NodeNG] = []
"""The contents of the block to catch exceptions from."""
self.handlers: typing.List[ExceptHandler] = []
"""The exception handlers."""
self.orelse: typing.List[NodeNG] = []
"""The contents of the ``else`` block."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
body: Optional[typing.List[NodeNG]] = None,
handlers: Optional[typing.List[ExceptHandler]] = None,
orelse: Optional[typing.List[NodeNG]] = None,
) -> None:
"""Do some setup after initialisation.
:param body: The contents of the block to catch exceptions from.
:param handlers: The exception handlers.
:param orelse: The contents of the ``else`` block.
"""
if body is not None:
self.body = body
if handlers is not None:
self.handlers = handlers
if orelse is not None:
self.orelse = orelse
def _infer_name(self, frame, name):
return name
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
last = None
for exhandler in self.handlers:
if exhandler.type and lineno == exhandler.type.fromlineno:
return lineno, lineno
if exhandler.body[0].fromlineno <= lineno <= exhandler.body[-1].tolineno:
return lineno, exhandler.body[-1].tolineno
if last is None:
last = exhandler.body[0].fromlineno - 1
return self._elsed_block_range(lineno, self.orelse, last)
def get_children(self):
yield from self.body
yield from self.handlers or ()
yield from self.orelse or ()
class TryFinally(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement):
"""Class representing an :class:`ast.TryFinally` node.
>>> node = astroid.extract_node('''
try:
do_something()
except Exception as error:
print("Error!")
finally:
print("Cleanup!")
''')
>>> node
<TryFinally l.2 at 0x7f23b2e41d68>
"""
_astroid_fields = ("body", "finalbody")
_multi_line_block_fields = ("body", "finalbody")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.body: typing.Union[typing.List[TryExcept], typing.List[NodeNG]] = []
"""The try-except that the finally is attached to."""
self.finalbody: typing.List[NodeNG] = []
"""The contents of the ``finally`` block."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
body: typing.Union[typing.List[TryExcept], typing.List[NodeNG], None] = None,
finalbody: Optional[typing.List[NodeNG]] = None,
) -> None:
"""Do some setup after initialisation.
:param body: The try-except that the finally is attached to.
:param finalbody: The contents of the ``finally`` block.
"""
if body is not None:
self.body = body
if finalbody is not None:
self.finalbody = finalbody
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
child = self.body[0]
# py2.5 try: except: finally:
if (
isinstance(child, TryExcept)
and child.fromlineno == self.fromlineno
and child.tolineno >= lineno > self.fromlineno
):
return child.block_range(lineno)
return self._elsed_block_range(lineno, self.finalbody)
def get_children(self):
yield from self.body
yield from self.finalbody
class Tuple(_BaseContainer):
"""Class representing an :class:`ast.Tuple` node.
>>> node = astroid.extract_node('(1, 2, 3)')
>>> node
<Tuple.tuple l.1 at 0x7f23b2e41780>
"""
_other_fields = ("ctx",)
def __init__(
self,
ctx: Optional[Context] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param ctx: Whether the tuple is assigned to or loaded from.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.ctx: Optional[Context] = ctx
"""Whether the tuple is assigned to or loaded from."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def pytype(self):
"""Get the name of the type that this node represents.
:returns: The name of the type.
:rtype: str
"""
return "%s.tuple" % BUILTINS
def getitem(self, index, context=None):
"""Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
"""
return _container_getitem(self, self.elts, index, context=context)
class UnaryOp(NodeNG):
"""Class representing an :class:`ast.UnaryOp` node.
>>> node = astroid.extract_node('-5')
>>> node
<UnaryOp l.1 at 0x7f23b2e4e198>
"""
_astroid_fields = ("operand",)
_other_fields = ("op",)
def __init__(
self,
op: Optional[str] = None,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param op: The operator.
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.op: Optional[str] = op
"""The operator."""
self.operand: Optional[NodeNG] = None
"""What the unary operator is applied to."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, operand: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param operand: What the unary operator is applied to.
"""
self.operand = operand
# This is set by inference.py
def _infer_unaryop(self, context=None):
raise NotImplementedError
def type_errors(self, context=None):
"""Get a list of type errors which can occur during inference.
Each TypeError is represented by a :class:`BadBinaryOperationMessage`,
which holds the original exception.
:returns: The list of possible type errors.
:rtype: list(BadBinaryOperationMessage)
"""
try:
results = self._infer_unaryop(context=context)
return [
result
for result in results
if isinstance(result, util.BadUnaryOperationMessage)
]
except InferenceError:
return []
def get_children(self):
yield self.operand
def op_precedence(self):
if self.op == "not":
return OP_PRECEDENCE[self.op]
return super().op_precedence()
class While(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement):
"""Class representing an :class:`ast.While` node.
>>> node = astroid.extract_node('''
while condition():
print("True")
''')
>>> node
<While l.2 at 0x7f23b2e4e390>
"""
_astroid_fields = ("test", "body", "orelse")
_multi_line_block_fields = ("body", "orelse")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.test: Optional[NodeNG] = None
"""The condition that the loop tests."""
self.body: typing.List[NodeNG] = []
"""The contents of the loop."""
self.orelse: typing.List[NodeNG] = []
"""The contents of the ``else`` block."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
test: Optional[NodeNG] = None,
body: Optional[typing.List[NodeNG]] = None,
orelse: Optional[typing.List[NodeNG]] = None,
) -> None:
"""Do some setup after initialisation.
:param test: The condition that the loop tests.
:param body: The contents of the loop.
:param orelse: The contents of the ``else`` block.
"""
self.test = test
if body is not None:
self.body = body
if orelse is not None:
self.orelse = orelse
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.test.tolineno
def block_range(self, lineno):
"""Get a range from the given line number to where this node ends.
:param lineno: The line number to start the range at.
:type lineno: int
:returns: The range of line numbers that this node belongs to,
starting at the given line number.
:rtype: tuple(int, int)
"""
return self._elsed_block_range(lineno, self.orelse)
def get_children(self):
yield self.test
yield from self.body
yield from self.orelse
def _get_yield_nodes_skip_lambdas(self):
"""A While node can contain a Yield node in the test"""
yield from self.test._get_yield_nodes_skip_lambdas()
yield from super()._get_yield_nodes_skip_lambdas()
class With(
mixins.MultiLineBlockMixin,
mixins.BlockRangeMixIn,
mixins.AssignTypeMixin,
Statement,
):
"""Class representing an :class:`ast.With` node.
>>> node = astroid.extract_node('''
with open(file_path) as file_:
print(file_.read())
''')
>>> node
<With l.2 at 0x7f23b2e4e710>
"""
_astroid_fields = ("items", "body")
_other_other_fields = ("type_annotation",)
_multi_line_block_fields = ("body",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.items: typing.List[typing.Tuple[NodeNG, Optional[NodeNG]]] = []
"""The pairs of context managers and the names they are assigned to."""
self.body: typing.List[NodeNG] = []
"""The contents of the ``with`` block."""
self.type_annotation: Optional[NodeNG] = None # can be None
"""If present, this will contain the type annotation passed by a type comment"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
items: Optional[typing.List[typing.Tuple[NodeNG, Optional[NodeNG]]]] = None,
body: Optional[typing.List[NodeNG]] = None,
type_annotation: Optional[NodeNG] = None,
) -> None:
"""Do some setup after initialisation.
:param items: The pairs of context managers and the names
they are assigned to.
:param body: The contents of the ``with`` block.
"""
if items is not None:
self.items = items
if body is not None:
self.body = body
self.type_annotation = type_annotation
@decorators.cachedproperty
def blockstart_tolineno(self):
"""The line on which the beginning of this block ends.
:type: int
"""
return self.items[-1][0].tolineno
def get_children(self):
"""Get the child nodes below this node.
:returns: The children.
:rtype: iterable(NodeNG)
"""
for expr, var in self.items:
yield expr
if var:
yield var
yield from self.body
class AsyncWith(With):
"""Asynchronous ``with`` built with the ``async`` keyword."""
class Yield(NodeNG):
"""Class representing an :class:`ast.Yield` node.
>>> node = astroid.extract_node('yield True')
>>> node
<Yield l.1 at 0x7f23b2e4e5f8>
"""
_astroid_fields = ("value",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.value: Optional[NodeNG] = None # can be None
"""The value to yield."""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, value: Optional[NodeNG] = None) -> None:
"""Do some setup after initialisation.
:param value: The value to yield.
"""
self.value = value
def get_children(self):
if self.value is not None:
yield self.value
def _get_yield_nodes_skip_lambdas(self):
yield self
class YieldFrom(Yield): # TODO value is required, not optional
"""Class representing an :class:`ast.YieldFrom` node."""
class DictUnpack(mixins.NoChildrenMixin, NodeNG):
"""Represents the unpacking of dicts into dicts using :pep:`448`."""
class FormattedValue(NodeNG):
"""Class representing an :class:`ast.FormattedValue` node.
Represents a :pep:`498` format string.
>>> node = astroid.extract_node('f"Format {type_}"')
>>> node
<JoinedStr l.1 at 0x7f23b2e4ed30>
>>> node.values
[<Const.str l.1 at 0x7f23b2e4eda0>, <FormattedValue l.1 at 0x7f23b2e4edd8>]
"""
_astroid_fields = ("value", "format_spec")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.value: NodeNG
"""The value to be formatted into the string."""
self.conversion: Optional[int] = None # can be None
"""The type of formatting to be applied to the value.
.. seealso::
:class:`ast.FormattedValue`
"""
self.format_spec: Optional[NodeNG] = None # can be None
"""The formatting to be applied to the value.
.. seealso::
:class:`ast.FormattedValue`
:type: JoinedStr or None
"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
value: NodeNG,
conversion: Optional[int] = None,
format_spec: Optional[NodeNG] = None,
) -> None:
"""Do some setup after initialisation.
:param value: The value to be formatted into the string.
:param conversion: The type of formatting to be applied to the value.
:param format_spec: The formatting to be applied to the value.
:type format_spec: JoinedStr or None
"""
self.value = value
self.conversion = conversion
self.format_spec = format_spec
def get_children(self):
yield self.value
if self.format_spec is not None:
yield self.format_spec
class JoinedStr(NodeNG):
"""Represents a list of string expressions to be joined.
>>> node = astroid.extract_node('f"Format {type_}"')
>>> node
<JoinedStr l.1 at 0x7f23b2e4ed30>
"""
_astroid_fields = ("values",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.values: typing.List[NodeNG] = []
"""The string expressions to be joined.
:type: list(FormattedValue or Const)
"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, values: Optional[typing.List[NodeNG]] = None) -> None:
"""Do some setup after initialisation.
:param value: The string expressions to be joined.
:type: list(FormattedValue or Const)
"""
if values is not None:
self.values = values
def get_children(self):
yield from self.values
class NamedExpr(mixins.AssignTypeMixin, NodeNG):
"""Represents the assignment from the assignment expression
>>> module = astroid.parse('if a := 1: pass')
>>> module.body[0].test
<NamedExpr l.1 at 0x7f23b2e4ed30>
"""
_astroid_fields = ("target", "value")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
"""
:param lineno: The line that this node appears on in the source code.
:param col_offset: The column that this node appears on in the
source code.
:param parent: The parent node in the syntax tree.
"""
self.target: NodeNG
"""The assignment target
:type: Name
"""
self.value: NodeNG
"""The value that gets assigned in the expression"""
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, target: NodeNG, value: NodeNG) -> None:
self.target = target
self.value = value
class Unknown(mixins.AssignTypeMixin, NodeNG):
"""This node represents a node in a constructed AST where
introspection is not possible. At the moment, it's only used in
the args attribute of FunctionDef nodes where function signature
introspection failed.
"""
name = "Unknown"
def qname(self):
return "Unknown"
def infer(self, context=None, **kwargs):
"""Inference on an Unknown node immediately terminates."""
yield util.Uninferable
class EvaluatedObject(NodeNG):
"""Contains an object that has already been inferred
This class is useful to pre-evaluate a particular node,
with the resulting class acting as the non-evaluated node.
"""
name = "EvaluatedObject"
_astroid_fields = ("original",)
_other_fields = ("value",)
def __init__(
self, original: NodeNG, value: typing.Union[NodeNG, util.Uninferable]
) -> None:
self.original: NodeNG = original
"""The original node that has already been evaluated"""
self.value: typing.Union[NodeNG, util.Uninferable] = value
"""The inferred value"""
super().__init__(
lineno=self.original.lineno,
col_offset=self.original.col_offset,
parent=self.original.parent,
)
def infer(self, context=None, **kwargs):
yield self.value
# Pattern matching #######################################################
class Match(Statement):
"""Class representing a :class:`ast.Match` node.
>>> node = astroid.extract_node('''
match x:
case 200:
...
case _:
...
''')
>>> node
<Match l.2 at 0x10c24e170>
"""
_astroid_fields = ("subject", "cases")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.subject: NodeNG
self.cases: typing.List["MatchCase"]
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
*,
subject: NodeNG,
cases: typing.List["MatchCase"],
) -> None:
self.subject = subject
self.cases = cases
class Pattern(NodeNG):
"""Base class for all Pattern nodes."""
class MatchCase(mixins.MultiLineBlockMixin, NodeNG):
"""Class representing a :class:`ast.match_case` node.
>>> node = astroid.extract_node('''
match x:
case 200:
...
''')
>>> node.cases[0]
<MatchCase l.3 at 0x10c24e590>
"""
_astroid_fields = ("pattern", "guard", "body")
_multi_line_block_fields = ("body",)
def __init__(self, *, parent: Optional[NodeNG] = None) -> None:
self.pattern: Pattern
self.guard: Optional[NodeNG]
self.body: typing.List[NodeNG]
super().__init__(parent=parent)
def postinit(
self,
*,
pattern: Pattern,
guard: Optional[NodeNG],
body: typing.List[NodeNG],
) -> None:
self.pattern = pattern
self.guard = guard
self.body = body
class MatchValue(Pattern):
"""Class representing a :class:`ast.MatchValue` node.
>>> node = astroid.extract_node('''
match x:
case 200:
...
''')
>>> node.cases[0].pattern
<MatchValue l.3 at 0x10c24e200>
"""
_astroid_fields = ("value",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.value: NodeNG
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, *, value: NodeNG) -> None:
self.value = value
class MatchSingleton(Pattern):
"""Class representing a :class:`ast.MatchSingleton` node.
>>> node = astroid.extract_node('''
match x:
case True:
...
case False:
...
case None:
...
''')
>>> node.cases[0].pattern
<MatchSingleton l.3 at 0x10c2282e0>
>>> node.cases[1].pattern
<MatchSingleton l.5 at 0x10c228af0>
>>> node.cases[2].pattern
<MatchSingleton l.7 at 0x10c229f90>
"""
_other_fields = ("value",)
def __init__(
self,
*,
value: Literal[True, False, None],
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.value: Literal[True, False, None] = value
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
class MatchSequence(Pattern):
"""Class representing a :class:`ast.MatchSequence` node.
>>> node = astroid.extract_node('''
match x:
case [1, 2]:
...
case (1, 2, *_):
...
''')
>>> node.cases[0].pattern
<MatchSequence l.3 at 0x10ca80d00>
>>> node.cases[1].pattern
<MatchSequence l.5 at 0x10ca80b20>
"""
_astroid_fields = ("patterns",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.patterns: typing.List[Pattern]
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, *, patterns: typing.List[Pattern]) -> None:
self.patterns = patterns
class MatchMapping(mixins.AssignTypeMixin, Pattern):
"""Class representing a :class:`ast.MatchMapping` node.
>>> node = astroid.extract_node('''
match x:
case {1: "Hello", 2: "World", 3: _, **rest}:
...
''')
>>> node.cases[0].pattern
<MatchMapping l.3 at 0x10c8a8850>
"""
_astroid_fields = ("keys", "patterns", "rest")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.keys: typing.List[NodeNG]
self.patterns: typing.List[Pattern]
self.rest: Optional[AssignName]
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
*,
keys: typing.List[NodeNG],
patterns: typing.List[Pattern],
rest: Optional[AssignName],
) -> None:
self.keys = keys
self.patterns = patterns
self.rest = rest
class MatchClass(Pattern):
"""Class representing a :class:`ast.MatchClass` node.
>>> node = astroid.extract_node('''
match x:
case Point2D(0, 0):
...
case Point3D(x=0, y=0, z=0):
...
''')
>>> node.cases[0].pattern
<MatchClass l.3 at 0x10ca83940>
>>> node.cases[1].pattern
<MatchClass l.5 at 0x10ca80880>
"""
_astroid_fields = ("cls", "patterns", "kwd_patterns")
_other_fields = ("kwd_attrs",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.cls: NodeNG
self.patterns: typing.List[Pattern]
self.kwd_attrs: typing.List[str]
self.kwd_patterns: typing.List[Pattern]
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
*,
cls: NodeNG,
patterns: typing.List[Pattern],
kwd_attrs: typing.List[str],
kwd_patterns: typing.List[Pattern],
) -> None:
self.cls = cls
self.patterns = patterns
self.kwd_attrs = kwd_attrs
self.kwd_patterns = kwd_patterns
class MatchStar(mixins.AssignTypeMixin, Pattern):
"""Class representing a :class:`ast.MatchStar` node.
>>> node = astroid.extract_node('''
match x:
case [1, *_]:
...
''')
>>> node.cases[0].pattern.patterns[1]
<MatchStar l.3 at 0x10ca809a0>
"""
_astroid_fields = ("name",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.name: Optional[AssignName]
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, *, name: Optional[AssignName]) -> None:
self.name = name
class MatchAs(mixins.AssignTypeMixin, Pattern):
"""Class representing a :class:`ast.MatchAs` node.
>>> node = astroid.extract_node('''
match x:
case [1, a]:
...
case {'key': b}:
...
case Point2D(0, 0) as c:
...
case d:
...
''')
>>> node.cases[0].pattern.patterns[1]
<MatchAs l.3 at 0x10d0b2da0>
>>> node.cases[1].pattern.patterns[0]
<MatchAs l.5 at 0x10d0b2920>
>>> node.cases[2].pattern
<MatchAs l.7 at 0x10d0b06a0>
>>> node.cases[3].pattern
<MatchAs l.9 at 0x10d09b880>
"""
_astroid_fields = ("pattern", "name")
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.pattern: Optional[Pattern]
self.name: Optional[AssignName]
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(
self,
*,
pattern: Optional[Pattern],
name: Optional[AssignName],
) -> None:
self.pattern = pattern
self.name = name
class MatchOr(Pattern):
"""Class representing a :class:`ast.MatchOr` node.
>>> node = astroid.extract_node('''
match x:
case 400 | 401 | 402:
...
''')
>>> node.cases[0].pattern
<MatchOr l.3 at 0x10d0b0b50>
"""
_astroid_fields = ("patterns",)
def __init__(
self,
lineno: Optional[int] = None,
col_offset: Optional[int] = None,
parent: Optional[NodeNG] = None,
) -> None:
self.patterns: typing.List[Pattern]
super().__init__(lineno=lineno, col_offset=col_offset, parent=parent)
def postinit(self, *, patterns: typing.List[Pattern]) -> None:
self.patterns = patterns
# constants ##############################################################
CONST_CLS = {
list: List,
tuple: Tuple,
dict: Dict,
set: Set,
type(None): Const,
type(NotImplemented): Const,
type(...): Const,
}
def _update_const_classes():
"""update constant classes, so the keys of CONST_CLS can be reused"""
klasses = (bool, int, float, complex, str, bytes)
for kls in klasses:
CONST_CLS[kls] = Const
_update_const_classes()
def _two_step_initialization(cls, value):
instance = cls()
instance.postinit(value)
return instance
def _dict_initialization(cls, value):
if isinstance(value, dict):
value = tuple(value.items())
return _two_step_initialization(cls, value)
_CONST_CLS_CONSTRUCTORS = {
List: _two_step_initialization,
Tuple: _two_step_initialization,
Dict: _dict_initialization,
Set: _two_step_initialization,
Const: lambda cls, value: cls(value),
}
def const_factory(value):
"""return an astroid node for a python value"""
# XXX we should probably be stricter here and only consider stuff in
# CONST_CLS or do better treatment: in case where value is not in CONST_CLS,
# we should rather recall the builder on this value than returning an empty
# node (another option being that const_factory shouldn't be called with something
# not in CONST_CLS)
assert not isinstance(value, NodeNG)
# Hack for ignoring elements of a sequence
# or a mapping, in order to avoid transforming
# each element to an AST. This is fixed in 2.0
# and this approach is a temporary hack.
if isinstance(value, (list, set, tuple, dict)):
elts = []
else:
elts = value
try:
initializer_cls = CONST_CLS[value.__class__]
initializer = _CONST_CLS_CONSTRUCTORS[initializer_cls]
return initializer(initializer_cls, elts)
except (KeyError, AttributeError):
node = EmptyNode()
node.object = value
return node
def is_from_decorator(node):
"""Return True if the given node is the child of a decorator"""
parent = node.parent
while parent is not None:
if isinstance(parent, Decorators):
return True
parent = parent.parent
return False
| lgpl-2.1 | -2,911,324,988,074,595,000 | 29.885611 | 108 | 0.582056 | false |
quora/qcore | qcore/tests/test_debug.py | 1 | 2876 | # Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import qcore
from qcore.asserts import AssertRaises, assert_is_substring, assert_is, assert_eq
from qcore.debug import get_bool_by_mask, set_by_mask
from unittest import mock
def test_hang_me_does_not_throw():
qcore.debug.hang_me(0)
with mock.patch("time.sleep") as mock_sleep:
qcore.debug.hang_me(1)
mock_sleep.assert_called_once_with(1)
mock_sleep.reset_mock()
qcore.debug.hang_me()
mock_sleep.assert_called_once_with(10000)
def test_hange_me_handles_exception():
with mock.patch("time.sleep") as mock_sleep:
mock_sleep.side_effect = RuntimeError
with AssertRaises(RuntimeError):
qcore.debug.hang_me()
mock_sleep.side_effect = KeyboardInterrupt
qcore.debug.hang_me()
def test_format_stack():
def foo():
return qcore.debug.format_stack()
st = foo()
assert_is_substring("in foo\n", st)
def test_debug_counter():
counter = qcore.debug.counter("test_debug_counter")
counter_again = qcore.debug.counter("test_debug_counter")
assert_is(counter, counter_again)
counter.increment(5)
assert_eq("DebugCounter('test_debug_counter', value=5)", str(counter))
assert_eq("DebugCounter('test_debug_counter', value=5)", repr(counter))
counter.decrement(3)
assert_eq("DebugCounter('test_debug_counter', value=2)", str(counter))
assert_eq("DebugCounter('test_debug_counter', value=2)", repr(counter))
def test_bool_by_mask():
class MaskObject(object):
def __init__(self):
self.TEST_MASK_1 = False
self.TEST_MASK_2 = True
m = MaskObject()
assert_is(True, get_bool_by_mask(m, "ABC"))
assert_is(False, get_bool_by_mask(m, "TEST_MASK"))
assert_is(False, get_bool_by_mask(m, "TEST_MASK_1"))
assert_is(True, get_bool_by_mask(m, "TEST_MASK_2"))
set_by_mask(m, "TEST_", True)
assert_is(True, get_bool_by_mask(m, "TEST_MASK"))
assert_is(True, get_bool_by_mask(m, "TEST_MASK_1"))
assert_is(True, get_bool_by_mask(m, "TEST_MASK_2"))
set_by_mask(m, "TEST_MASK_2", False)
assert_is(True, get_bool_by_mask(m, "ABC"))
assert_is(False, get_bool_by_mask(m, "TEST_MASK"))
assert_is(True, get_bool_by_mask(m, "TEST_MASK_1"))
assert_is(False, get_bool_by_mask(m, "TEST_MASK_2"))
| apache-2.0 | -8,369,799,410,944,293,000 | 33.650602 | 81 | 0.66968 | false |
developerworks/horizon | horizon/dashboards/nova/instances_and_volumes/instances/views.py | 1 | 4753 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Nova instances.
"""
import logging
from django import http
from django import shortcuts
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext as _
from horizon import api
from horizon import exceptions
from horizon import forms
from horizon import tabs
from .forms import UpdateInstance
from .tabs import InstanceDetailTabs
LOG = logging.getLogger(__name__)
def console(request, instance_id):
try:
# TODO(jakedahn): clean this up once the api supports tailing.
data = api.server_console_output(request, instance_id)
except:
data = _('Unable to get log for instance "%s".') % instance_id
exceptions.handle(request, ignore=True)
response = http.HttpResponse(mimetype='text/plain')
response.write(data)
response.flush()
return response
def vnc(request, instance_id):
try:
console = api.server_vnc_console(request, instance_id)
instance = api.server_get(request, instance_id)
return shortcuts.redirect(console.url +
("&title=%s(%s)" % (instance.name, instance_id)))
except:
redirect = reverse("horizon:nova:instances_and_volumes:index")
msg = _('Unable to get VNC console for instance "%s".') % instance_id
exceptions.handle(request, msg, redirect=redirect)
class UpdateView(forms.ModalFormView):
form_class = UpdateInstance
template_name = 'nova/instances_and_volumes/instances/update.html'
context_object_name = 'instance'
def get_object(self, *args, **kwargs):
if not hasattr(self, "object"):
instance_id = self.kwargs['instance_id']
try:
self.object = api.server_get(self.request, instance_id)
except:
redirect = reverse("horizon:nova:instances_and_volumes:index")
msg = _('Unable to retrieve instance details.')
exceptions.handle(self.request, msg, redirect=redirect)
return self.object
def get_initial(self):
return {'instance': self.kwargs['instance_id'],
'tenant_id': self.request.user.tenant_id,
'name': getattr(self.object, 'name', '')}
class DetailView(tabs.TabView):
tab_group_class = InstanceDetailTabs
template_name = 'nova/instances_and_volumes/instances/detail.html'
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["instance"] = self.get_data()
return context
def get_data(self):
if not hasattr(self, "_instance"):
try:
instance_id = self.kwargs['instance_id']
instance = api.server_get(self.request, instance_id)
instance.volumes = api.volume_instance_list(self.request,
instance_id)
# Gather our flavors and images and correlate our instances to
# them. Exception handling happens in the parent class.
flavors = api.flavor_list(self.request)
full_flavors = SortedDict([(str(flavor.id), flavor) for \
flavor in flavors])
instance.full_flavor = full_flavors[instance.flavor["id"]]
except:
redirect = reverse('horizon:nova:instances_and_volumes:index')
exceptions.handle(self.request,
_('Unable to retrieve details for '
'instance "%s".') % instance_id,
redirect=redirect)
self._instance = instance
return self._instance
def get_tabs(self, request, *args, **kwargs):
instance = self.get_data()
return self.tab_group_class(request, instance=instance, **kwargs)
| apache-2.0 | -8,869,323,412,014,233,000 | 37.642276 | 78 | 0.632443 | false |
dr-prodigy/python-holidays | holidays/countries/spain.py | 1 | 6751 | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <[email protected]> (c) 2014-2017
# dr-prodigy <[email protected]> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd, TH, FR, MO
from holidays.constants import (
JAN,
FEB,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.constants import SUN
from holidays.holiday_base import HolidayBase
class Spain(HolidayBase):
PROVINCES = [
"AN",
"AR",
"AS",
"CB",
"CM",
"CL",
"CT",
"VC",
"EX",
"GA",
"IB",
"CN",
"MD",
"MC",
"ML",
"NC",
"PV",
"RI",
]
def __init__(self, **kwargs):
self.country = "ES"
self.prov = kwargs.pop("prov", kwargs.pop("state", ""))
HolidayBase.__init__(self, **kwargs)
def _is_observed(self, date_holiday, name_holiday):
if self.observed and date_holiday.weekday() == SUN:
self[date_holiday + rd(days=+1)] = name_holiday + " (Trasladado)"
else:
self[date_holiday] = name_holiday
def _populate(self, year):
self._is_observed(date(year, JAN, 1), "Año nuevo")
self._is_observed(date(year, JAN, 6), "Epifanía del Señor")
if (
year < 2015
and self.prov
and self.prov
in [
"AR",
"CL",
"CM",
"EX",
"GA",
"MD",
"ML",
"MC",
"NC",
"PV",
"VC",
]
):
self._is_observed(date(year, MAR, 19), "San José")
elif (
year == 2015
and self.prov
and self.prov in ["CM", "MD", "ML", "MC", "NC", "PV", "VC"]
):
self._is_observed(date(year, MAR, 19), "San José")
elif (
year == 2016
and self.prov
and self.prov in ["ML", "MC", "PV", "VC"]
):
self._is_observed(date(year, MAR, 19), "San José")
elif year == 2017 and self.prov and self.prov in ["PV"]:
self._is_observed(date(year, MAR, 19), "San José")
elif (
2018 <= year <= 2019
and self.prov
and self.prov in ["GA", "MC", "NC", "PV", "VC"]
):
self._is_observed(date(year, MAR, 19), "San José")
elif (
2020 <= year <= 2025
and self.prov
and self.prov in ["CM", "GA", "MC", "NC", "PV", "VC"]
):
self._is_observed(date(year, MAR, 19), "San José")
if self.prov and self.prov not in ["CT", "VC"]:
self[easter(year) + rd(weeks=-1, weekday=TH)] = "Jueves Santo"
self[easter(year) + rd(weeks=-1, weekday=FR)] = "Viernes Santo"
if self.prov and self.prov in ["CT", "PV", "NC", "VC", "IB", "CM"]:
self[easter(year) + rd(weekday=MO)] = "Lunes de Pascua"
self._is_observed(date(year, MAY, 1), "Día del Trabajador")
if self.prov and self.prov in ["CT", "GA", "VC"]:
self._is_observed(date(year, JUN, 24), "San Juan")
self._is_observed(date(year, AUG, 15), "Asunción de la Virgen")
self._is_observed(date(year, OCT, 12), "Día de la Hispanidad")
self._is_observed(date(year, NOV, 1), "Todos los Santos")
self._is_observed(
date(year, DEC, 6), "Día de la Constitución " "Española"
)
self._is_observed(date(year, DEC, 8), "La Inmaculada Concepción")
self._is_observed(date(year, DEC, 25), "Navidad")
if self.prov and self.prov in ["CT", "IB"]:
self._is_observed(date(year, DEC, 26), "San Esteban")
# Provinces festive day
if self.prov:
if self.prov == "AN":
self._is_observed(date(year, FEB, 28), "Día de Andalucia")
elif self.prov == "AR":
self._is_observed(date(year, APR, 23), "Día de San Jorge")
elif self.prov == "AS":
self._is_observed(date(year, SEP, 8), "Día de Asturias")
elif self.prov == "CB":
self._is_observed(
date(year, JUL, 28),
"Día de las Instituci" "ones de Cantabria",
)
elif self.prov == "CM":
self._is_observed(
date(year, MAY, 31), "Día de Castilla " "La Mancha"
)
elif self.prov == "CL":
self._is_observed(
date(year, APR, 23), "Día de Castilla y " "Leon"
)
elif self.prov == "CT":
self._is_observed(
date(year, SEP, 11), "Día Nacional de " "Catalunya"
)
elif self.prov == "VC":
self._is_observed(
date(year, OCT, 9), "Día de la Comunidad " "Valenciana"
)
elif self.prov == "EX":
self._is_observed(date(year, SEP, 8), "Día de Extremadura")
elif self.prov == "GA":
self._is_observed(
date(year, JUL, 25), "Día Nacional de " "Galicia"
)
elif self.prov == "IB":
self._is_observed(
date(year, MAR, 1), "Día de las Islas " "Baleares"
)
elif self.prov == "CN":
self._is_observed(date(year, MAY, 30), "Día de Canarias")
elif self.prov == "MD":
self._is_observed(
date(year, MAY, 2), "Día de Comunidad de " "Madrid"
)
elif self.prov == "MC":
self._is_observed(
date(year, JUN, 9), "Día de la Región de " "Murcia"
)
elif self.prov == "NC":
self._is_observed(date(year, SEP, 27), "Día de Navarra")
elif self.prov == "PV":
self._is_observed(date(year, OCT, 25), "Día del Páis Vasco")
elif self.prov == "RI":
self._is_observed(date(year, JUN, 9), "Día de La Rioja")
class ES(Spain):
pass
class ESP(Spain):
pass
| mit | -6,767,827,126,775,115,000 | 32.919192 | 78 | 0.470518 | false |
koniiiik/ksp_login | ksp_login/context_processors.py | 1 | 1967 | from social_core.backends.base import BaseAuth
from social_core.utils import module_member
from .utils import setting
DEFAULT_AUTHENTICATION_PROVIDERS_BRIEF = 3
def get_login_providers(request, short=False):
"""
Returns a list of available login providers based on the
AUTHENTICATION_BACKENDS setting. Each provider is represented as a
dictionary containing the backend name, name of required parameter if
required and its verbose name.
"""
def extract_backend_data(klass):
"""
Helper function which extracts information useful for use in
templates from SocialAuth subclasses and returns it as a
dictionary.
"""
return {
'name': klass.name,
'required_field': klass.REQUIRED_FIELD_NAME,
'required_field_verbose': klass.REQUIRED_FIELD_VERBOSE_NAME,
}
backends = (module_member(auth_backend) for auth_backend in setting('AUTHENTICATION_BACKENDS'))
providers = [extract_backend_data(backend) for backend in backends if issubclass(backend, BaseAuth)]
if short:
return providers[:setting('AUTHENTICATION_PROVIDERS_BRIEF',
DEFAULT_AUTHENTICATION_PROVIDERS_BRIEF)]
return providers
def login_providers(request):
"""
Returns the full list of login providers as the social_auth context
variable.
"""
return {'login_providers': get_login_providers(request)}
def login_providers_short(request):
"""
Returns the short list of login providers for use in a login widget as
the social_auth context variable.
"""
return {'login_providers_short': get_login_providers(request, short=True)}
def login_providers_both(request):
"""
Returns both the short and the long list of login providers.
"""
return {
'login_providers': get_login_providers(request),
'login_providers_short': get_login_providers(request, short=True),
}
| bsd-3-clause | 3,773,942,804,654,436,400 | 32.338983 | 104 | 0.68124 | false |
andredalton/bcc | 2014/MAC0242/miniep3/miniep3.py | 1 | 5396 | #! /usr/bin/env python3
import sys
import re
def uso():
"""Imprime instruções de uso do programa."""
uso = """
Este programa gera a ordem correta de inclusão em um banco de dados.
Passe o nome dos arquivos na linha de comando. Caso queira imprimir
ocorrências de referência circular ou inexistente utilize a opção -v.
"""
print(uso)
def imprime(dic):
"""Imprime um dicionário de listas num formato melhorado."""
for key in dic.keys():
print(key + ": ", end="")
print(', '.join(dic[key]))
def procura(dic, ordem, verb):
"""Procura uma ordem correta de inclusão enquanto existirem tabelas cujas todas as referências já
tenham sido processadas. Quando em modo detalhado imprime a ocorrência de referência circular ou
inexistente."""
if len(dic) == 0:
""" Busca finalizada. """
return ordem
lst = set()
""" Tabelas a serem removidas nesta iteração. """
for table in dic.keys():
if len(dic[table]) == 0:
lst.add(table)
if len(lst) == 0:
""" Caso todas as tabelas restantes possuam referências a serem processadas restaram apenas
referências inexistentes ou circulares."""
if verb:
print("\nAs tabelas a seguir possuem referência circular ou inexistente:")
imprime(dic)
print("\nO resultado obtido foi:")
return ordem
for key in lst:
ordem.append(key)
del(dic[key])
for table in dic.keys():
for key in lst:
if key in dic[table]:
dic[table].remove(key)
procura(dic, ordem, verb)
def procedencia(lst, verb):
"""Gera uma lista de procedencia para cada tabela.
Inicialmente a função iria trabalhar com o arquivo separado por linhas,
mas como o arquivo pode ser inteiro feito em apenas uma linha modifiquei
a estratégia para uma varredura de estados. Não me preocupei com erros de
sintaxe.
Lista de estados:
0: Procurando por uma instrução CREATE
1: Verificando se é uma instrução de criação de tabela TABLE
2: Procurando o nome da tabela que está sendo criada, contando que diferente de ";"
3: Procurando se é uma referência a criação de chave estrangeira FOREIGN
4: Verificando se é uma referência a criação de chave estrangeira KEY
5: Procurando as referências REFERENCES
6: Procurando o nome da tabela de referência, contando que diferente de ";"
7: Próxima palavra é o novo delimitador
final: Caso ocorra uma instrução com o delimitador encerra a criação da tabela
"""
delimitador = ";"
status = 0
"""Estado inicial do autômato."""
proc = {}
""" Dicionário de procedentes. """
tabela = ""
""" Tabela sendo montada no estado atual. """
fim = re.compile(".*" + delimitador + ".*")
""" Expressão regular que verifica a ocorrência de um delimitador ";".
Supondo que o delimitador não seja alterado. """
create = re.compile(".*[cC][rR][eE][aA][tT][eE]$")
""" Expressão regular que verifica se a palavra atual termina com CREATE. """
delim = re.compile(".*[dD][eE][lL][iI][mM][iI][tT][eE][rR]$")
""" Expressão regular que verifica se a palavra atual termina com DELIMITER. """
for p in lst:
if status == 0 and create.match(p):
status = 1
elif status == 0 and delim.match(p):
status = 7
elif status == 1:
if p.lower() == "table":
status = 2
else:
status = 0
elif status == 2 and p != delimitador and len(p.replace("`","")) > 0:
tabela = p.replace("`","")
if tabela in proc and verb:
print("TABELA " + tabela + " RECRIADA")
proc[tabela] = set()
status = 3
elif status == 3 and p.lower() == "foreign":
status = 4
elif status == 4:
if p.lower() == "key":
status = 5
else:
status = 0
elif status == 5 and p.lower() == "references":
status = 6
elif status == 6 and p != delimitador and len(p.replace("`","")) > 0:
ref = p.replace("`","")
proc[tabela].add(ref)
status = 3
elif status == 7:
delimitador = p
fim = re.compile(".*" + re.escape(delimitador) + ".*")
status = 0
elif fim.match(p):
if create.match(p):
status = 1
else:
status = 0
tabela = ""
return proc
def main(argv):
veb = False
if "-v" in argv:
veb = True
""" Função que trata a linha de comando e chama as funcões do programa."""
ordem = []
""" Lista que irá conter a ordem de restauração dos arquivos. """
if len(argv) > 0:
for arquivo in argv:
if arquivo == "-v":
continue
ordem = []
if len(argv) > 1:
print("\nARQUIVO: " + arquivo)
with open(arquivo, "r") as myfile:
text=myfile.read().split()
dic = procedencia(text, veb)
procura(dic, ordem, veb)
print('.sql\n'.join(ordem), end=".sql\n")
else:
uso()
if __name__ == "__main__":
main(sys.argv[1:]) | apache-2.0 | -7,787,084,893,410,947,000 | 34.825503 | 101 | 0.565111 | false |
chaincoin/chaincoin | contrib/gitian-build.py | 1 | 13919 | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'apt-cacher-ng', 'make', 'wget']
if args.kvm:
programs += ['python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker:
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install docker', file=sys.stderr)
exit(1)
else:
programs += ['lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/chaincoin/gitian.sigs.git'])
if not os.path.isdir('chaincoin-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/chaincoin/chaincoin-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('chaincoin'):
subprocess.check_call(['git', 'clone', 'https://github.com/chaincoin/chaincoin.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
exit(0)
def build():
global args, workdir
os.makedirs('chaincoin-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://downloads.sourceforge.net/project/osslsigncode/osslsigncode/osslsigncode-1.7.1.tar.gz'])
subprocess.check_call(['wget', '-N', '-P', 'inputs', 'https://bitcoincore.org/cfields/osslsigncode-Backports-to-1.7.1.patch'])
subprocess.check_call(["echo 'a8c4e9cafba922f89de0df1f2152e7be286aba73f78505169bc351a7938dd911 inputs/osslsigncode-Backports-to-1.7.1.patch' | sha256sum -c"], shell=True)
subprocess.check_call(["echo 'f9a8cdb38b9c309326764ebc937cba1523a3a751a7ab05df3ecc99d18ae466c9 inputs/osslsigncode-1.7.1.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../chaincoin/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'chaincoin='+args.commit, '--url', 'chaincoin='+args.url, '../chaincoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/chaincoin-*.tar.gz build/out/src/chaincoin-*.tar.gz ../chaincoin-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'chaincoin='+args.commit, '--url', 'chaincoin='+args.url, '../chaincoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/chaincoin-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/chaincoin-*.zip build/out/chaincoin-*.exe ../chaincoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'chaincoin='+args.commit, '--url', 'chaincoin='+args.url, '../chaincoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/chaincoin-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/chaincoin-*.tar.gz build/out/chaincoin-*.dmg ../chaincoin-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/chaincoin-' + args.version + '-win-unsigned.tar.gz inputs/chaincoin-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../chaincoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/chaincoin-*win64-setup.exe ../chaincoin-binaries/'+args.version, shell=True)
subprocess.check_call('mv build/out/chaincoin-*win32-setup.exe ../chaincoin-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/chaincoin-' + args.version + '-osx-unsigned.tar.gz inputs/chaincoin-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '-i', '--commit', 'signature='+args.commit, '../chaincoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../chaincoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/chaincoin-osx-signed.dmg ../chaincoin-binaries/'+args.version+'/chaincoin-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../chaincoin/contrib/gitian-descriptors/gitian-linux.yml'])
print('\nVerifying v'+args.version+' Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../chaincoin/contrib/gitian-descriptors/gitian-win.yml'])
print('\nVerifying v'+args.version+' MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../chaincoin/contrib/gitian-descriptors/gitian-osx.yml'])
print('\nVerifying v'+args.version+' Signed Windows\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../chaincoin/contrib/gitian-descriptors/gitian-win-signer.yml'])
print('\nVerifying v'+args.version+' Signed MacOS\n')
subprocess.check_call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../chaincoin/contrib/gitian-descriptors/gitian-osx-signer.yml'])
os.chdir(workdir)
def main():
global args, workdir
parser = argparse.ArgumentParser(usage='%(prog)s [options] signer version')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/chaincoin/chaincoin', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Uses LXC. If you want to use KVM, use the --kvm option. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', help='GPG signer to sign each build assert file')
parser.add_argument('version', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.buildsign:
args.build=True
args.sign=True
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
# Set environment variable USE_LXC or USE_DOCKER, let gitian-builder know that we use lxc or docker
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if not 'GITIAN_HOST_IP' in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if not 'LXC_GUEST_IP' in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.11.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
script_name = os.path.basename(sys.argv[0])
# Signer and version shouldn't be empty
if args.signer == '':
print(script_name+': Missing signer.')
print('Try '+script_name+' --help for more information')
exit(1)
if args.version == '':
print(script_name+': Missing version.')
print('Try '+script_name+' --help for more information')
exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
if args.setup:
setup()
os.chdir('chaincoin')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/chaincoin')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
verify()
if __name__ == '__main__':
main()
| mit | -7,426,322,469,110,016,000 | 58.738197 | 233 | 0.648969 | false |
edx/edx-notifications | edx_notifications/channels/urban_airship.py | 1 | 7474 | """
NotificationChannelProvider to integrate with the Urban Airship mobile push
notification services
"""
import json
import logging
import six
import requests
from requests.auth import HTTPBasicAuth
from requests.exceptions import RequestException
from edx_notifications.channels.channel import BaseNotificationChannelProvider
# system defined constants that only we should know about
UA_API_PUSH_ENDPOINT = 'https://go.urbanairship.com/api/push/'
PUSH_REQUEST_HEADER = {
'Content-Type': 'application/json',
'Accept': 'application/vnd.urbanairship+json; version=3;'
}
log = logging.getLogger(__name__)
class UrbanAirshipNotificationChannelProvider(BaseNotificationChannelProvider):
"""
Implementation of the BaseNotificationChannelProvider abstract interface
"""
def __init__(self, name=None, display_name=None, display_description=None, link_resolvers=None):
"""
Initializer
"""
super().__init__(
name=name,
display_name=display_name,
display_description=display_description,
link_resolvers=link_resolvers
)
def dispatch_notification_to_user(self, user_id, msg, channel_context=None):
"""
Send a notification to a user. It is assumed that
'user_id' and 'msg' are valid and have already passed
all necessary validations
:param user_id:
:param msg:
:param channel_context:
:return:
"""
payload = self.create_payload(msg, user_id)
payload = json.dumps(payload)
api_credentials = channel_context.get('api_credentials') if channel_context else None
return self.call_ua_push_api(payload, api_credentials)
@staticmethod
def create_payload(msg, user_id):
"""
Creates payload for UA push request for single named user
:param msg:
:param user_id:
:return:
"""
assert msg.payload['title'], 'Notification title not available in payload'
assert user_id, 'No user id given'
obj = {
"notification": {"alert": msg.payload['title']},
"audience": {"named_user": str(user_id)},
"device_types": ["ios", "android"]
}
return obj
def call_ua_push_api(self, payload, api_credentials):
"""
Calls Urban Airship push API to send push notifications
:param payload: json payload to be passed to push notifications API
:param api_credentials: dict containing provider id and secret key
Returns: json response sent by UA
"""
resp = {}
try:
resp = requests.post(
UA_API_PUSH_ENDPOINT,
payload,
headers=PUSH_REQUEST_HEADER,
auth=HTTPBasicAuth(api_credentials["provider_key"], api_credentials["provider_secret"])
)
resp = resp.json()
if not resp['ok']:
log.error(
"Urban Airship push notifications API failed. Details: %s Error: %s",
resp.get('details'), resp.get('error')
)
except RequestException as ex:
log.error("Urban Airship push notifications API failed with error %s", str(ex))
return resp
def bulk_dispatch_notification(self, user_ids, msg, exclude_user_ids=None, channel_context=None):
"""
Perform a bulk dispatch of the notification message to
all user_ids that will be enumerated over in user_ids.
:param user_ids:
:param msg:
:param exclude_user_ids:
:param channel_context:
:return:
"""
if 'tag_group' in msg.payload:
payload = self.create_tag_group_payload(msg)
elif 'send_to_all' in msg.payload and msg.payload['send_to_all'] is True:
payload = self.create_all_user_payload(msg)
else:
exclude_user_ids = exclude_user_ids if exclude_user_ids else []
actual_user_ids = []
for user_id in user_ids:
if user_id not in exclude_user_ids:
actual_user_ids.append(user_id)
payload = self.create_bulk_user_payload(actual_user_ids, msg)
self._add_type_in_payload(msg, payload)
payload = json.dumps(payload)
api_credentials = channel_context.get('api_credentials') if channel_context else None
return self.call_ua_push_api(payload, api_credentials)
def _add_type_in_payload(self, msg, payload):
"""
Adds a notification type in payload if notification_type is present in the message
Notification types:
- courseannouncement
:param msg:
:param payload:
"""
if 'notification_type' in msg.payload:
extra = {
"extra": {
"notification_type": msg.payload['notification_type']
}
}
ios_android_extras = {
"ios": extra,
"android": extra,
}
payload['notification'].update(ios_android_extras)
@staticmethod
def create_tag_group_payload(msg):
"""
Creates payload for UA push request for tag group
:param msg:
:return:
"""
assert msg.payload['title'], 'Notification title not available in payload'
alert = msg.payload['title']
group = msg.payload.get('tag_group', 'enrollments')
tag = msg.payload.get('tag', msg.namespace)
obj = {
"notification": {
"alert": alert,
},
"device_types": "all",
"audience": {
"group": group,
"tag": tag
}
}
if 'open_url' in msg.payload:
obj["notification"]["actions"] = {
"open": {
"type": "url",
"content": msg.payload['open_url']
}
}
return obj
@staticmethod
def create_bulk_user_payload(user_ids, msg):
"""
Creates payload to send UA push notification to list of users
:param user_ids: list of user ids
:param msg:
:return:
"""
assert user_ids, 'List of user ids is empty'
assert msg.payload['title'], 'Notification title not available in payload'
obj = {
"notification": {
"alert": msg.payload['title']
},
"device_types": ["ios", "android"],
"audience": {
"named_user": [str(user_id) for user_id in user_ids]
}
}
return obj
@staticmethod
def create_all_user_payload(msg):
"""
Creates payload to send UA push notification to all users
:param msg:
:return:
"""
assert msg.payload['title'], 'Notification title not available in payload'
obj = {
"notification": {
"alert": msg.payload['title']
},
"device_types": "all",
"audience": "all"
}
return obj
def resolve_msg_link(self, msg, link_name, params, channel_context=None):
"""
Generates the appropriate link given a msg, a link_name, and params
"""
# Click through links do not apply for mobile push notifications
return None
| agpl-3.0 | -3,941,502,762,100,949,000 | 31.354978 | 103 | 0.563821 | false |
glenngillen/dotfiles | .vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/common/stacks.py | 1 | 1686 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
"""Provides facilities to dump all stacks of all threads in the process.
"""
import os
import sys
import time
import threading
import traceback
from debugpy.common import log
def dump():
"""Dump stacks of all threads in this process, except for the current thread.
"""
tid = threading.current_thread().ident
pid = os.getpid()
log.info("Dumping stacks for process {0}...", pid)
for t_ident, frame in sys._current_frames().items():
if t_ident == tid:
continue
for t in threading.enumerate():
if t.ident == tid:
t_name = t.name
t_daemon = t.daemon
break
else:
t_name = t_daemon = "<unknown>"
stack = "".join(traceback.format_stack(frame))
log.info(
"Stack of thread {0} (tid={1}, pid={2}, daemon={3}):\n\n{4}",
t_name,
t_ident,
pid,
t_daemon,
stack,
)
log.info("Finished dumping stacks for process {0}.", pid)
def dump_after(secs):
"""Invokes dump() on a background thread after waiting for the specified time.
"""
def dumper():
time.sleep(secs)
try:
dump()
except:
log.swallow_exception()
thread = threading.Thread(target=dumper)
thread.daemon = True
thread.start()
| mit | 3,639,968,426,756,852,700 | 23.545455 | 82 | 0.559312 | false |
bskinn/sphobjinv | src/sphobjinv/fileops.py | 1 | 3152 | r"""*File I/O helpers for* ``sphobjinv``.
``sphobjinv`` is a toolkit for manipulation and inspection of
Sphinx |objects.inv| files.
**Author**
Brian Skinn ([email protected])
**File Created**
5 Nov 2017
**Copyright**
\(c) Brian Skinn 2016-2021
**Source Repository**
https://github.com/bskinn/sphobjinv
**Documentation**
https://sphobjinv.readthedocs.io/en/latest
**License**
The MIT License; see |license_txt|_ for full license terms
**Members**
"""
import json
from pathlib import Path
def readbytes(path):
"""Read file contents and return as |bytes|.
.. versionchanged:: 2.1
`path` can now be |Path| or |str|. Previously, it had to be |str|.
Parameters
----------
path
|str| or |Path| -- Path to file to be opened.
Returns
-------
b
|bytes| -- Contents of the indicated file.
"""
return Path(path).read_bytes()
def writebytes(path, contents):
"""Write indicated file contents.
Any existing file at `path` will be overwritten.
.. versionchanged:: 2.1
`path` can now be |Path| or |str|. Previously, it had to be |str|.
Parameters
----------
path
|str| or |Path| -- Path to file to be written.
contents
|bytes| -- Content to be written to file.
"""
Path(path).write_bytes(contents)
def readjson(path):
"""Create |dict| from JSON file.
No data or schema validation is performed.
.. versionchanged:: 2.1
`path` can now be |Path| or |str|. Previously, it had to be |str|.
Parameters
----------
path
|str| or |Path| -- Path to JSON file to be read.
Returns
-------
d
|dict| -- Deserialized JSON.
"""
return json.loads(Path(path).read_text())
def writejson(path, d):
"""Create JSON file from |dict|.
No data or schema validation is performed.
Any existing file at `path` will be overwritten.
.. versionchanged:: 2.1
`path` can now be |Path| or |str|. Previously, it had to be |str|.
Parameters
----------
path
|str| or |Path| -- Path to output JSON file.
d
|dict| -- Data structure to serialize.
"""
Path(path).write_text(json.dumps(d))
def urlwalk(url):
r"""Generate a series of candidate |objects.inv| URLs.
URLs are based on the seed `url` passed in. Ensure that the
path separator in `url` is the standard **forward** slash
('|cour|\ /\ |/cour|').
Parameters
----------
url
|str| -- Seed URL defining directory structure to walk through.
Yields
------
inv_url
|str| -- Candidate URL for |objects.inv| location.
"""
# Scrub any anchor, as it fouls things
url = url.partition("#")[0]
urlparts = url.rstrip("/").split("/")
# This loop condition results in the yielded values stopping at
# 'http[s]://domain.com/objects.inv', since the URL protocol
# specifier has two forward slashes
while len(urlparts) >= 3:
urlparts.append("objects.inv")
yield "/".join(urlparts)
urlparts.pop()
urlparts.pop()
| mit | -78,736,673,134,251,550 | 18.823899 | 74 | 0.590419 | false |
saurabh6790/ON-RISAPP | clinical/doctype/patient_report/patient_report.py | 1 | 4942 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr
import os, json
from webnotes.utils import get_base_path
from install_erpnext import exec_in_shell
from webnotes.model.doc import Document
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def validate(self):
get_head_field=webnotes.conn.sql("""select field from `tabSingles` where doctype='Patient Report Setting' and value=1 and field!='show_table_border'""",as_list=1)
label_size=webnotes.conn.sql("""select value from `tabSingles` where doctype='Patient Report Setting' and field='lable_size' """)
label_font=webnotes.conn.sql("""select value from `tabSingles` where doctype='Patient Report Setting' and field='lable_font' """)
show_border=webnotes.conn.sql("""select value from `tabSingles` where doctype='Patient Report Setting' and field='show_table_border' """)
branch_id=webnotes.conn.sql("""select value from `tabSingles` where doctype='Patient Report Setting' and field='branch_id'""")
subtitle=webnotes.conn.sql("""select value from `tabSingles` where doctype='Patient Report Setting' and field='subtitle'""")
header=webnotes.conn.sql("""select ifnull(value,0) from `tabSingles` where doctype='Patient Report Setting' and field='is_header'""")
footer=webnotes.conn.sql("""select ifnull(value,0) from `tabSingles` where doctype='Patient Report Setting' and field='is_footer'""")
company=webnotes.conn.sql("select value from tabSingles where doctype = 'Global Defaults' and field = 'default_company'")
# webnotes.errprint(company)
field_list=[]
print_dic={}
field_seq_list = ['accession_number', 'institution_name', 'patient_id', 'patient_name', 'sex', 'age',
'patient_birth_date', 'patient_comment','modality','study', 'study_date', 'study_time', 'study_comment','referring_physician']
for field in field_seq_list:
if [field] in get_head_field:
# webnotes.errprint(field)
field_list.append(field)
#webnotes.errprint(["field_list")
print_dic={"head_fields":field_list,"label_size":label_size[0][0],"label_font":label_font[0][0],"show_border":show_border[0][0],"subtitle":subtitle[0][0],"company":company[0][0],"is_header":header[0][0],"is_footer":footer[0][0]}
if branch_id:
print_dic['branch_id']=branch_id[0][0]
strjson=json.dumps(print_dic)
#webnotes.errprint(strjson)
self.doc.print_details = strjson
signature_path=webnotes.conn.sql("""select signature_image from `tabProfile` where name in (select user_id from `tabEmployee` where name='%s')"""%(self.doc.technologist_id),as_list=1)
# webnotes.errprint(signature_path)
if signature_path:
self.doc.signiture_image=signature_path[0][0]
def on_update(self):
self.doc.report_status = 'New'
self.update_report_state('New')
def on_submit(self):
user=webnotes.conn.get_value("Profile", webnotes.session.user, 'concat(first_name," ",last_name)')
self.doc.reported_by = user
set_reported_by(self.doc.name, user)
set_report_status(self.doc.name)
self.update_report_state('Final')
# self.run_method('update_after_submit')
def update_report_state(self, state):
webnotes.conn.sql(""" update `tabPatient Encounter Entry`
set report_status = '%(state)s'
where name = "%(encounter)s"
"""%{'state':state, 'encounter': self.doc.accession_number})
@webnotes.whitelist()
def get_server_id():
return webnotes.conn.sql("select value from tabSingles where doctype = 'Global Defaults' and field = 'pacs_server_id'")[0][0]
# def show_images(self):pass
# from selenium import webdriver
# driver = webdriver.Ie()
# med syn 25650411/12 9881495351/2
@webnotes.whitelist()
def set_report_status(name):
webnotes.conn.sql(""" update `tabPatient Report`
set report_status = '%(state)s'
where name = "%(name)s"
"""%{'state':'Final', 'name': name })
webnotes.conn.commit()
@webnotes.whitelist()
def set_reported_by(name, reported_by):
webnotes.conn.sql(""" update `tabPatient Report`
set reported_by = '%(reported_by)s'
where name = "%(name)s"
"""%{'reported_by': reported_by, 'name': name })
webnotes.conn.commit()
@webnotes.whitelist()
def get_encounters(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("""select name, patient, patient_name from `tabPatient Encounter Entry`
where (%(key)s like "%(txt)s"
or patient_name like "%(txt)s") """%{'key': searchfield, 'txt': "%%%s%%" % txt})
@webnotes.whitelist()
def get_pee(doctype, txt, searchfield, start, page_len, filters):
return webnotes.conn.sql("""select name, patient, patient_name from `tabPatient Encounter Entry`
where (%(key)s like "%(txt)s"
or patient_name like "%(txt)s") and status='Confirmed'"""%{'key': searchfield, 'txt': "%%%s%%" % txt})
| agpl-3.0 | 6,853,336,334,213,661,000 | 43.522523 | 230 | 0.701335 | false |
jpfxgood/ped | tests/test_ssh_dialog.py | 1 | 4654 | import curses
import curses.ascii
from ped_core import keytab
from ped_test_util import read_str,validate_dialog,editor_test_suite,play_macro,screen_size,match_attr
from ped_ssh_dialog.ssh_dialog import SSHFileDialog
from ped_ssh_dialog.ssh_mod import ssh_put, ssh_del, ssh_stat
from ped_dialog import dialog
import pytest
import os
import time
@pytest.fixture(scope="function")
def sftp_testdir(request,testdir):
sftp_basepath = os.environ.get("SSH_DIALOG_BASEPATH",None)
sftp_username = os.environ.get("SSH_DIALOG_USERNAME",None)
sftp_password = os.environ.get("SSH_DIALOG_PASSWORD",None)
assert sftp_basepath and sftp_username and sftp_password,"SSH_DIALOG environment not set"
local_files = []
remote_files = []
local_file_names = []
remote_file_names = []
for i in range(0,5):
args = { "local_%d"%(i):"\n".join(["local_%d test line %d"%(i,j) for j in range(0,200)])}
local_files.append(testdir.makefile(".txt",**args))
args = { "remote_%d"%(i):"\n".join(["local_%d test line %d"%(i,j) for j in range(0,200)])}
remote_files.append(testdir.makefile(".txt",**args))
for f in remote_files:
ssh_put( str(f), sftp_basepath+str(f),lambda : { "ssh_username" : sftp_username, "ssh_password" : sftp_password}, False )
remote_file_names.append(f.basename)
f.remove()
for f in local_files:
local_file_names.append(f.basename)
def cleanup_sftp_testdir():
ssh_del( sftp_basepath+str(testdir.tmpdir.parts()[1]),True, lambda : { "ssh_username" : sftp_username, "ssh_password" : sftp_password })
request.addfinalizer(cleanup_sftp_testdir)
return {"ssh_username" : sftp_username,
"ssh_password" : sftp_password,
"ssh_basepath": sftp_basepath+str(testdir.tmpdir),
"local_path": str(testdir.tmpdir),
"local_files" : local_file_names,
"remote_files" : remote_file_names,
"testdir" : testdir }
def test_ssh_dialog(sftp_testdir,capsys):
with capsys.disabled():
def main(stdscr):
screen_size( 30, 100 )
d = SSHFileDialog(stdscr, title = "SFTP File Manager",
remote_path=sftp_testdir["ssh_basepath"],
ssh_username=sftp_testdir["ssh_username"],
ssh_password=sftp_testdir["ssh_password"],
local_path=sftp_testdir["local_path"])
d.main(False,True)
validate_dialog(d)
d.main(False,True,keytab.KEYTAB_TAB)
d.main(False,True,keytab.KEYTAB_TAB)
d.main(False,True,keytab.KEYTAB_TAB)
assert(d.focus_list[d.current][1].name == "ssh_files")
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
(ch,values) = d.main(False,True,keytab.KEYTAB_CR)
selection,file_list = values["ssh_files"]
assert(file_list[selection] == sftp_testdir["remote_files"][2] and values["ssh_file"] == sftp_testdir["remote_files"][2] and values["local_file"] == sftp_testdir["remote_files"][2])
d.goto(d.get_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(os.path.exists(os.path.join(str(sftp_testdir["testdir"].tmpdir),sftp_testdir["remote_files"][2])))
d.goto(d.file_list)
assert(d.focus_list[d.current][1].name == "local_files")
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
(ch,values) = d.main(False,True,keytab.KEYTAB_CR)
selection,file_list = values["local_files"]
assert(file_list[selection] == sftp_testdir["local_files"][2] and values["ssh_file"] == sftp_testdir["local_files"][2] and values["local_file"] == sftp_testdir["local_files"][2])
d.goto(d.put_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ssh_stat( values["ssh_dir"]+"/"+values["ssh_file"],lambda : { 'ssh_username':sftp_testdir['ssh_username'], 'ssh_password':sftp_testdir['ssh_password'] }) != (-1,-1))
d.goto(d.open_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ch == dialog.Component.CMP_KEY_OK)
d.goto(d.cancel_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ch == dialog.Component.CMP_KEY_CANCEL)
curses.wrapper(main)
| mit | 1,378,709,122,071,100,400 | 46.979381 | 193 | 0.603567 | false |
astuanax/stopwords | setup.py | 1 | 1758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='stopwords',
version='1.0.0',
description="Stopwords filter for 42 languages",
long_description=readme + '\n\n' + history,
author="Len Dierickx",
author_email='[email protected]',
url='https://github.com/astuanax/stopwords',
download_url = 'https://github.com/astuanax/stopwords/tarball/0.1', # I'll explain this in a second
packages=[
'stopwords',
],
package_dir={'stopwords':
'stopwords'},
include_package_data=True,
package_data={
'stopwords': [
'languages/*/*.txt',
'languages/languages.json',
]
},
install_requires=requirements,
license="ISCL",
zip_safe=False,
keywords=['stopwords','language processing','nlp','filter'],
classifiers=[
'5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
#test_suite='tests',
#tests_require=test_requirements
)
| isc | 5,546,850,083,140,206,000 | 26.904762 | 103 | 0.612059 | false |
sdkyoku/FetchImg | logger.py | 1 | 2812 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Contributor:
# sdkyoku <[email protected]>
# Specially thanks:
# GoAgent Team
__version__ = '0.1.01'
import sys
import os
import time
class MyLogger(type(sys)):
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
INFO = 20
DEBUG = 10
NOTSET = 0
LEVEL = 20
def __init__(self, *args, **kw):
self.level = self.__class__.INFO
self.__set_error_color = lambda: None
self.__set_warning_color = lambda: None
self.__set_debug_color = lambda: None
self.__reset_color = lambda: None
if hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
if os.name == 'nt':
import ctypes
set_console_text_attr = ctypes.windll.kernel32.SetConsoleTextAttribute
get_std_handle = ctypes.windll.kernel32.GetStdHandle
self.__set_error_color = lambda: set_console_text_attr(get_std_handle(-11), 0x04)
self.__set_warning_color = lambda: set_console_text_attr(get_std_handle(-11), 0x06)
self.__set_debug_color = lambda: set_console_text_attr(get_std_handle(-11), 0x002)
self.__reset_color = lambda: set_console_text_attr(get_std_handle(-11), 0x07)
elif os.name == 'posix':
self.__set_error_color = lambda: sys.stderr.write('\033[31m')
self.__set_warning_color = lambda: sys.stderr.write('\033[33m')
self.__set_debug_color = lambda: sys.stderr.write('\033[32m')
self.__reset_color = lambda: sys.stderr.write('\033[0m')
@classmethod
def get_logger(cls, *args, **kw):
return cls(*args, **kw)
def basic_config(self, *args, **kw):
self.level = int(kw.get('level', self.__class__.INFO))
if self.level > self.__class__.DEBUG:
self.debug = self.dummy
def log(self, level, fmt, *args, **kw):
sys.stderr.write('%s - [%s] %s\n' % (level, time.ctime()[4:-5], fmt % args))
def dummy(self, *args, **kw):
pass
def debug(self, fmt, *args, **kw):
if self.LEVEL == 20:
return
self.__set_debug_color()
self.log('DEBUG', fmt, *args, **kw)
self.__reset_color()
def info(self, fmt, *args, **kw):
self.log('INFO', fmt, *args, **kw)
def warning(self, fmt, *args, **kw):
self.__set_warning_color()
self.log('WARNING', fmt, *args, **kw)
self.__reset_color()
def error(self, fmt, *args, **kw):
self.__set_error_color()
self.log('ERROR', fmt, *args, **kw)
self.__reset_color()
def set_logger_level(self, level):
self.LEVEL = int(level)
mylogger = sys.modules['mylogger'] = MyLogger('mylogger') # eof
| lgpl-3.0 | -5,334,778,995,574,390,000 | 32.082353 | 99 | 0.55192 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.