code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import datetime as dt
import logging
import os
import re
import yaml
from flask import current_app
log = logging.getLogger(__name__)
def get_prefixed_index_html():
"""
The backend should modify the <base> element of the index.html file to
align with the configured prefix the backend is listening
"""
prefix = os.path.join("/", current_app.config["PREFIX"], "")
static_dir = current_app.config["STATIC_DIR"]
log.info("Setting the <base> to reflect the prefix: %s", prefix)
with open(os.path.join(static_dir, "index.html"), "r") as f:
index_html = f.read()
index_prefixed = re.sub(
r"\<base href=\".*\".*\>", '<base href="%s">' % prefix, index_html,
)
return index_prefixed
def load_yaml(f):
"""
f: file path
Load a yaml file and convert it to a python dict.
"""
c = None
try:
with open(f, "r") as yaml_file:
c = yaml_file.read()
except IOError:
log.error("Error opening: %s", f)
return None
try:
contents = yaml.safe_load(c)
if contents is None:
# YAML exists but is empty
return {}
else:
# YAML exists and is not empty
return contents
except yaml.YAMLError:
return None
def load_param_yaml(f, **kwargs):
"""
f: file path
Load a yaml file and convert it to a python dict. The yaml might have some
`{var}` values which the user will have to format. For this we first read
the yaml file and replace these variables and then convert the generated
string to a dict via the yaml module.
"""
c = None
try:
with open(f, "r") as yaml_file:
c = yaml_file.read().format(**kwargs)
except IOError:
log.error("Error opening: %s", f)
return None
try:
contents = yaml.safe_load(c)
if contents is None:
# YAML exists but is empty
return {}
else:
# YAML exists and is not empty
return contents
except yaml.YAMLError:
return None
def get_uptime(then):
"""
then: datetime instance | string
Return a string that informs how much time has pasted from the provided
timestamp.
"""
if isinstance(then, str):
then = dt.datetime.strptime(then, "%Y-%m-%dT%H:%M:%SZ")
now = dt.datetime.now()
diff = now - then.replace(tzinfo=None)
days = diff.days
hours = int(diff.seconds / 3600)
mins = int((diff.seconds % 3600) / 60)
age = ""
if days > 0:
if days == 1:
age = str(days) + " day"
else:
age = str(days) + " days"
else:
if hours > 0:
if hours == 1:
age = str(hours) + " hour"
else:
age = str(hours) + " hours"
else:
if mins == 0:
return "just now"
if mins == 1:
age = str(mins) + " min"
else:
age = str(mins) + " mins"
return age + " ago" | zoracloud | /crud_backend/helpers.py | helpers.py |
import functools
import logging
from kubernetes import client
from kubernetes import config as k8s_config
from kubernetes.client.rest import ApiException
from kubernetes.config import ConfigException
from werkzeug.exceptions import Forbidden, Unauthorized
from . import authn, config, settings
log = logging.getLogger(__name__)
try:
# Load configuration inside the Pod
k8s_config.load_incluster_config()
except ConfigException:
# Load configuration for testing
k8s_config.load_kube_config()
# The API object for submitting SubjecAccessReviews
authz_api = client.AuthorizationV1Api()
def create_subject_access_review(user, verb, namespace, group, version,
resource, subresource):
"""
Create the SubjecAccessReview object which we will use to determine if the
user is authorized.
"""
return client.V1SubjectAccessReview(
spec=client.V1SubjectAccessReviewSpec(
user=user,
resource_attributes=client.V1ResourceAttributes(
group=group,
namespace=namespace,
verb=verb,
resource=resource,
version=version,
subresource=subresource,
),
)
)
def is_authorized(user, verb, group, version, resource, namespace=None,
subresource=None):
"""
Create a SubjectAccessReview to the K8s API to determine if the user is
authorized to perform a specific verb on a resource.
"""
# Skip authz check if in dev mode
if config.dev_mode_enabled():
log.debug("Skipping authorization check in development mode")
return True
# Skip authz check if admin explicitly requested it
if settings.DISABLE_AUTH:
log.info("APP_DISABLE_AUTH set to True. Skipping authorization check")
return True
if user is None:
log.warning("No user credentials were found! Make sure you have"
" correctly set the USERID_HEADER in the Web App's"
" deployment.")
raise Unauthorized(description="No user credentials were found!")
sar = create_subject_access_review(user, verb, namespace, group, version,
resource, subresource)
try:
obj = authz_api.create_subject_access_review(sar)
except ApiException as e:
log.error("Error submitting SubjecAccessReview: %s, %s", sar, e)
raise e
if obj.status is not None:
return obj.status.allowed
else:
log.error("SubjectAccessReview doesn't have status.")
return False
def generate_unauthorized_message(user, verb, group, version, resource,
subresource=None, namespace=None):
msg = "User '%s' is not authorized to %s" % (user, verb)
if group == "":
msg += " %s/%s" % (version, resource)
else:
msg += " %s/%s/%s" % (group, version, resource)
if subresource is not None:
msg += "/%s" % subresource
if namespace is not None:
msg += " in namespace '%s'" % namespace
return msg
def ensure_authorized(verb, group, version, resource, namespace=None,
subresource=None):
user = authn.get_username()
if not is_authorized(user, verb, group, version, resource,
namespace=namespace, subresource=subresource):
msg = generate_unauthorized_message(user, verb, group, version,
resource, subresource=subresource,
namespace=namespace)
raise Forbidden(description=msg)
def needs_authorization(verb, group, version, resource, namespace=None,
subresource=None):
"""
This function will serve as a decorator. It will be used to make sure that
the decorated function is authorized to perform the corresponding k8s api
verb on a specific resource.
"""
def wrapper(func):
@functools.wraps(func)
def runner(*args, **kwargs):
# Run the decorated function only if the user is authorized
ensure_authorized(verb, group, version, resource,
namespace=namespace, subresource=subresource)
return func(*args, **kwargs)
return runner
return wrapper | zoracloud | /crud_backend/authz.py | authz.py |
import time
import datetime
import json
import csv
from math import log10
__doc__ = """
你已导入模块:{file}
本模块中归集PySpark中处理数据常用的一些函数,可以作为UDF函数使用
Auth: chenzhongrun
Mail: [email protected]
ReleaseDate: 2019-04-17
Usage:
from pyspark.sql import SparkSession
spark = SparkSession.builder \\
.appName("bonc_model")\\
.enableHiveSupport()\\
.getOrCreate()
spark.sparkContext.addPyFile('file:///tmp/geodata/udf_funcs.py')
import udf_funcs
e.g:
spark.udf.register('stripLeftZero', udf_funcs.strip_left_zero_and_86) # 去除对端号码的开头的所有0和86,使得号码格式标准化
query = \"\"\"select distinct opp_nbr
,stripLeftZero(opp_nbr) opp_none_86
from source_zjdw.NET_CDR_VS_O
where input_day = "20190401" \"\"\"
df = spark.sql(query)
df.filter('opp_nbr <> opp_none_86').show()
+-------------+-----------+
| opp_nbr|opp_none_86|
+-------------+-----------+
| 057188661027|57188661027|
| 057182191628|57182191628|
|8618758881103|18758881103|
| 057186789250|57186789250|
| 057128154833|57128154833|
| 057182208591|57182208591|
|8615027490666|15027490666|
+-------------+-----------+
there are the funcs you can import and register:
{funcs}
"""
funcs = dict()
funcs_names = dict()
def udf_funcs(func):
func_name = func.__name__
func_name_upper = ''.join([w.capitalize() for w in func_name.split('_')])
funcs_names[func_name_upper] = func_name
funcs[func_name_upper] = func
return func
##################################################################
# 以下函数,对经度进行Geohash编码和解码 #
##################################################################
# Note: the alphabet in geohash differs from the common base32
# alphabet described in IETF's RFC 4648
# (http://tools.ietf.org/html/rfc4648)
__base32 = '0123456789bcdefghjkmnpqrstuvwxyz'
__decodemap = {}
for i in range(len(__base32)):
__decodemap[__base32[i]] = i
del i
def decode_exactly(geohash):
"""
Decode the geohash to its exact values, including the error
margins of the result. Returns four float values: latitude,
longitude, the plus/minus error for latitude (as a positive
number) and the plus/minus error for longitude (as a positive
number).
"""
lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)
lat_err, lon_err = 90.0, 180.0
is_even = True
for c in geohash:
cd = __decodemap[c]
for mask in [16, 8, 4, 2, 1]:
if is_even: # adds longitude info
lon_err /= 2
if cd & mask:
lon_interval = ((lon_interval[0]+lon_interval[1])/2, lon_interval[1])
else:
lon_interval = (lon_interval[0], (lon_interval[0]+lon_interval[1])/2)
else: # adds latitude info
lat_err /= 2
if cd & mask:
lat_interval = ((lat_interval[0]+lat_interval[1])/2, lat_interval[1])
else:
lat_interval = (lat_interval[0], (lat_interval[0]+lat_interval[1])/2)
is_even = not is_even
lat = (lat_interval[0] + lat_interval[1]) / 2
lon = (lon_interval[0] + lon_interval[1]) / 2
return lat, lon, lat_err, lon_err
@udf_funcs
def decode(geohash):
"""
Decode geohash, returning two strings with latitude and longitude
containing only relevant digits and with trailing zeroes removed.
"""
lat, lon, lat_err, lon_err = decode_exactly(geohash)
# Format to the number of decimals that are known
lats = "%.*f" % (max(1, int(round(-log10(lat_err)))) - 1, lat)
lons = "%.*f" % (max(1, int(round(-log10(lon_err)))) - 1, lon)
if '.' in lats: lats = lats.rstrip('0')
if '.' in lons: lons = lons.rstrip('0')
return lats, lons
def encode(latitude, longitude, precision=12):
"""
Encode a position given in float arguments latitude, longitude to
a geohash which will have the character count precision.
"""
lat_interval, lon_interval = (-90.0, 90.0), (-180.0, 180.0)
geohash = []
bits = [ 16, 8, 4, 2, 1 ]
bit = 0
ch = 0
even = True
while len(geohash) < precision:
if even:
mid = (lon_interval[0] + lon_interval[1]) / 2
if longitude > mid:
ch |= bits[bit]
lon_interval = (mid, lon_interval[1])
else:
lon_interval = (lon_interval[0], mid)
else:
mid = (lat_interval[0] + lat_interval[1]) / 2
if latitude > mid:
ch |= bits[bit]
lat_interval = (mid, lat_interval[1])
else:
lat_interval = (lat_interval[0], mid)
even = not even
if bit < 4:
bit += 1
else:
geohash += __base32[ch]
bit = 0
ch = 0
return ''.join(geohash)
@udf_funcs
def geohash_encode(latitude, longitude, precision=12):
try:
latitude = float(latitude)
longitude = float(longitude)
except Exception:
return '0' * precision
return encode(latitude, longitude, precision)
##################################################################
# 以下函数,用于为时间打上标签,如交通高峰期,非高峰期等 #
##################################################################
@udf_funcs
def peak(start_time):
# 7:00-9:30 morning peak,17:00-20:00 evening peak,others non-peak
if time.strptime(start_time, "%Y%m%d%H%M%S").tm_hour in [7, 8]:
return 'mor_peak'
elif time.strptime(start_time, "%Y%m%d%H%M%S").tm_hour == 9 and time.strptime(start_time, "%Y%m%d%H%M%S").tm_min in list(range(0, 30)):
return 'mor_peak'
elif time.strptime(start_time, "%Y%m%d%H%M%S").tm_hour in [17, 18, 19]:
return 'even_peak'
else:
return ''
@udf_funcs
def weekday(start_time):
week_day = [0, 1, 2, 3, 4]
if time.strptime(start_time, "%Y%m%d%H%M%S").tm_wday in week_day:
return 'weekday'
else:
return 'weekend'
@udf_funcs
def daytime(start_time):
# 7:00-20:00 daytime,23:00-7:00 night,others non
day_time = ['07', '08', '09', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19']
# night: from 23 o'clock to 7 o'clock
night = ['23', '00', '01', '02', '03', '04', '05', '06']
if start_time[8: 10] in day_time:
return 'daytime'
elif start_time[8: 10] in night:
return 'night'
else:
return ''
@udf_funcs
def day_of_week(starttime, fm='%Y-%m-%d %H:%M:%S'):
"""
返回starttime是周几
0对应周一,6对应周日
"""
if not starttime or not isinstance(starttime, str):
return 0
week_day = datetime.datetime.strptime(starttime, fm).weekday()
return week_day
@udf_funcs
def is_weekend(starttime, fm='%Y-%m-%d %H:%M:%S'):
"""
返回starttime是否是周末(周六周日)
"""
return int(day_of_week(starttime, fm) >= 5)
##################################################################
# 以下函数,对字符串进行处理 #
##################################################################
@udf_funcs
def strip_left_zero_and_86(phone_num):
"""
去除号码的开头的所有0和86,使得号码格式标准化
"""
return phone_num.lstrip('0').lstrip('86')
@udf_funcs
def trans_latin_to_utf8(s):
"""
对latin编码的字符进行解码为utf-8
"""
return s.encode('latin').decode('utf8')
class AppLogger(object):
def __init__(self, file, level='DEBUG', role='root', p=True):
self.file = file
self.levels = {
'ERROR': 900,
'WARN': 700,
'INFO': 500,
'DEBUG': 300,
}
self.level = level.upper()
self.p = p
self.role = role
def log(self, level, message):
if self.levels.get(level.upper()) < self.level:
return
log_time = time.strftime('%Y-%m-%d %H:%M:%S')
content = '{} {} {}'.format(log_time, self.role, message)
if self.p:
print(content)
with open(self.file, mode='a+') as f:
f.write(content)
def open_csv(file):
with open(file, mode='r') as f:
f_csv = csv.reader(f)
rows = [row for row in f_csv]
return rows
def create_df_from_csv(file, spark):
rows = open(file)
rdd = spark.sparkContext.parallelize(rows)
df = spark.createDataFrame(rdd)
return df
__doc__ = __doc__.format(file=__file__, funcs=json.dumps(funcs_names, ensure_ascii=False, indent=2))
print(__doc__) | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/udf_funcs.py | udf_funcs.py |
import csv
import os
import sqlite3
from zoran_tools.path_tools import create_father_dir
from zoran_tools.csv_tools import readcsv
class CSV2DB(object):
def __init__(self, name=None, memory=':memory:'):
if name:
self.name = name
self.db = sqlite3.connect(memory)
self.table_dict = dict()
def create_table(self, name, columns):
"""
根据表名和字段在数据库中创建一个表
:param name:
:param columns:
:return:
"""
if name not in self.tables:
sql = 'create table {}({});'.format(name, columns)
self.db.execute(sql)
return True
else:
return False
def get_table(self, name=None, filename=None, content=None, fields=None, encoding='utf8'):
"""
从文件或列表中创建一个Table对象,并在数据库中创建一个表
"""
table = _Table(db=self, name=name, filename=filename, content=content, fields=fields, encoding=encoding)
table.create()
self.table_dict[table.name] = table
return self
def table(self, name):
return self.table_dict[name]
@property
def tables(self, printit=False):
"""
查询数据库所有的表
"""
sql = 'SELECT name FROM sqlite_master WHERE type="table" ORDER BY name;'
tables = self.runsql(sql).fetchall()
if printit:
print(tables)
return tables
def runsql(self, sql):
"""
运行SQL语句
:param sql:
:return:
"""
return self.db.execute(sql)
def sql(self, sql):
"""
运行SQL语句,与self.runsql()一致,另建一个功能重复的函数是为了与Spark中接口命名保持统一
"""
return self.runsql(sql)
def run_insert(self, name, content):
"""
进行表插入
:param name: 表名
:param content: 要插入的列表,列表的每一个元素是一个要插入的行
"""
self.db.executemany(
'INSERT INTO {} VALUES ({})'.format(name, ','.join(['?']*len(content[0]))),
[tuple(r) for r in content]
)
return len(content)
class _Table(object):
def __init__(self, db, name=None, filename=None, content=None, fields=None, encoding='utf8'):
"""
表对象
:param db: 表所属的数据库
:param name: 表名
:param filename: 表内容来自的CSV文件
:param content: 表内容来自的列表
:param field: 表字段
:param encoding:
"""
if (not name and not filename) or (not filename and not content):
raise ValueError
self.db = db
self._name = name
self._filename = filename
self._content = content
self._fields = fields
self.encoding = encoding
@property
def name(self):
"""
表名
"""
if self._name:
return self._name
else:
return os.path.basename(os.path.splitext(self._filename)[0])
@property
def fields(self):
"""
字段
"""
if self._fields:
return self._fields
else:
pat = '_{} varchar(600)' # 与Spark生成的字段名保持一致
row_length = len(self.content[0])
if row_length == 0:
raise ValueError
indexes = list(range(1, row_length + 1))
fields = ','.join([pat.format(i) for i in indexes])
return fields
@property
def content(self):
if self._content:
return self._content
else:
return readcsv(filename=self._filename, encoding=self.encoding)
def collect(self):
return self.content # 重复功能是为了与Spark函数保持一致
def _create(self):
self.db.create_table(name=self.name, columns=self.fields)
def _insert(self):
self.db.run_insert(name=self.name, content=self.content)
def create(self):
self._create()
self._insert()
def select_all(self):
sql = 'select * from {};'.format(self.name)
return self.db.runsql(sql).fetchall() | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/csvdb.py | csvdb.py |
import sys
import math
import time
import codecs
from collections import Iterable
def readfile(file, encoding='utf8'):
"""
读取文本文件, 返回文本文件内容
:param file: 要读取的文件名
:param encoding: 文件编码, 默认为utf8
:return: 返回文件内容
"""
with codecs.open(file, mode='r', encoding=encoding) as f:
return f.read()
def writefile(file, content, mode='w', encoding='utf8'):
"""
将文本内容写入文件
:param file: 要写入的文件名
:param content: 要写入的内容
:param mode: 写入模式, 默认为w覆盖
:param encoding: 写入编码, 默认为utf8
:return:
"""
with codecs.open(file, mode=mode, encoding=encoding) as f:
f.write(content)
def transcode(from_file, to_file, from_code='utf8', to_code='GBK'):
"""
转换文本文件格式
:param from_file: 待转换的文件名
:param to_file: 转换后的文件名
:param from_code: 转换前的文件编码
:param to_code: 转换后的文件编码
:return:
"""
content = readfile(from_file, encoding=from_code)
writefile(to_file, content, encoding=to_code)
return '{} ====> {}'.format(from_file, to_file)
def split_list(li, num=8):
"""
分割列表
:param li: 要分割的列表
:param num: 要分割的份数
:return: 返回分割结果
"""
return split_list_by_len(li, math.ceil(len(li) / num))
def split_list_by_len(li, n):
return [li[i: i + n] for i in range(0, len(li), n)]
class WithProgressBar(object):
"""
用于在控制台打印一个进度条,用法(作用于for 循环):
# 仅打印进度条
for i in WithProgressBar(range(1000)):
pass
##############################################----98%
# 打印进度条,并获得一部分预览数据
preview_data = PreviewDataInBar()
for i in WithProgressBar(range(1000), preview_data=preview_data):
pass
##################################################100%
print(preview_data.preview)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,... ...199]
"""
def __init__(self, iterable: Iterable, sign: str='#', preview_data=None, condition=lambda obj: True):
"""
传入一个可迭代对象iterable, 传入一个条件condition,yield 满足条件的元素。
在这个过程中,会在控制台打印一个进度条,进度条符号用sign表示。
如果传入了preview_data(一个PreviewDataInLoop 的实例),会截取一部分元素作为预览数据。
:param iterable:
:param sign:
:param preview_data:
:param condition:
"""
if isinstance(iterable, Iterable):
self.iterable = iterable
elif isinstance(iterable, int):
self.iterable = range(iterable)
else:
raise TypeError
self.sign = sign
self.preview_data = preview_data
self.count = 0
self.condition = condition
def __len__(self):
return len(self.iterable)
def __iter__(self):
i = 0
for i, obj in enumerate(self.iterable, 1):
if self.condition(obj):
if isinstance(self.preview_data, PreviewDataInLoop):
self.preview_data.append(obj)
# self.preview_data.count += 1
# if self.preview_data.count <= self.preview_data.limit:
# self.preview_data.preview.append(obj)
yield obj
percentage = i * 100 / self.__len__()
time_now = time.strftime('%Y-%m-%d %H:%M:%S>')
writing = time_now + self.sign * int(percentage / 2) + '-' * (50 - int(percentage / 2)) + '%d%%\r' % (percentage)
sys.stdout.write(writing)
sys.stdout.flush()
sys.stdout.write(time_now + self.sign * 50 + '100%\r\n')
self.count = i
class PreviewDataInLoop(object):
"""
用于获取for 循环中的预览数据,用法见于WithProgressBar类的注释信息,也可见于以下信息:
preview_data = PreviewDataInBar()
for i in preview_data.with_progress_bar(range(1000)):
time.sleep(0.1)
##################################################100%
print(preview_data.preview)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,... ...199]
为什么要在for循环中取一部分预览数据?——这在数据库取数据中有用。
如何作用于while 循环?没什么特别好的方法。
preview_data = PreviewDataInBar()
while some_condition(obj):
preview_data.append(obj)
"""
def __init__(self, limit=200):
self.preview = []
self.limit = limit
self.count = 0
def append(self, obj):
self.count += 1
if self.count <= self.limit:
self.preview.append(obj)
def with_progress_bar(self, iterable, sign='#', condition=lambda obj: True):
return WithProgressBar(iterable=iterable, sign=sign, preview_data=self, condition=condition) | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/zoran_tools.py | zoran_tools.py |
import csv
from zoran_tools.path_tools import create_father_dir
def readcsv(filename: str, encoding: str='utf8', delimiter: str=',', quotechar: str=None, li: bool=True):
"""
接收一个CSV文件名, 返回其内容, 可能返回<list>也可能返回<generator>
:param filename: 要读取的CSV路径
:param encoding:
:param delimiter:
:param quotechar:
:param li: 指定返回的数据格式, 为True时返回列表, 为False时返回生成器
:return:
"""
with open(file=filename, mode='r', encoding=encoding, newline='') as f:
if quotechar:
f_csv = csv.reader(f, delimiter=delimiter, quotechar=quotechar)
else:
f_csv = csv.reader(f, delimiter=delimiter)
if li:
return list(f_csv)
else:
for row in f_csv:
yield row
def write_csv(filename: str, row_or_rows: list, mode: str='a+', encoding: str='utf8', newline: str='',
delimiter: str=',', quotechar: str='"', quoting: str='all'):
"""
接收一个文件名(路径)和一个列表, 将列表写入该文件
:param filename: <str> 要写入的文件名
:param row_or_rows: <list> 要写入的列表
:param mode:
:param encoding:
:param newline:
:param delimiter:
:param quotechar:
:param quoting: <csv.QUOTE_MINIMAL, csv.QUOTE_ALL, csv.QUOTE_NONNUMERIC, csv.QUOTE_NONE>
csv.QUOTE_MINIMAL 仅当字段中包含分隔符时才用引号括起,
csv.QUOTE_ALL 任何情况下都将字段用引号括起,
csv.QUOTE_NONNUMERIC 括起非数字字段, 数字不括起,
csv.QUOTE_NONE 不括起任何字段
"""
create_father_dir(filename)
quoting_dict = {
'minimal': csv.QUOTE_MINIMAL,
'nonnumeric': csv.QUOTE_NONNUMERIC,
'all': csv.QUOTE_ALL,
'none': csv.QUOTE_NONE,
}
quoting = quoting_dict.get(quoting.lower(), csv.QUOTE_NONE)
with open(file=filename, mode=mode, encoding=encoding, newline=newline) as f:
sp = csv.writer(f, delimiter=delimiter, quotechar=quotechar, quoting=quoting)
if all([isinstance(e, (list, tuple)) for e in row_or_rows]):
sp.writerows(row_or_rows)
else:
sp.writerow(row_or_rows)
def write_csv_row(file: str, row: list, mode: str='a+', encoding: str='utf8', newline: str=''):
with open(file, mode=mode, encoding=encoding, newline=newline) as f:
csv.writer(f).writerow(row) | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/csv_tools.py | csv_tools.py |
import logging
import time
import json
class Logger(object):
levels = {
None: -1,
'DEBUG': 100,
'INFO': 200,
'WARN': 300,
'ERROR': 400,
}
time_format = '%y-%m-%d %H:%M:%S'
def __init__(self, obj='root', level='DEBUG'):
self._level = level
self._levels = Logger.levels
self._message = ''
self.role = obj.__repr__()
self._message_bak = obj.__str__()
self._time_format = Logger.time_format
self._print_time: str = ''
self._print = '{level}: {role} {time}\n\t{message}'
@property
def level(self):
return self._level
@level.setter
def level(self, level):
if level in self._levels:
self._level = level
else:
raise ValueError
@property
def time_fmt(self):
return self._time_format
@time_fmt.setter
def time_fmt(self, fmt):
self._time_format = fmt
def time(self):
self._print_time = time.strftime(self._time_format)
return self
def json(self, dictionary):
self._message += '\n' + json.dumps(dictionary, indent=2, ensure_ascii=False)
return self
def message(self, message):
self._message += message.__str__()
return self
def print_message(self, message, level):
if Logger.levels.get(level) >= Logger.levels.get(self._level):
self.message(message)
print(self._print.format(level=level, role=self.role, time=self._print_time, message=self._message))
self._message = level + ': ' + self._message + ' '
self._print_time = ''
self._message = ''
return self
def info(self, message):
return self.print_message(message, 'INFO')
def debug(self, message):
return self.print_message(message, 'DEBUG')
def warn(self, message):
return self.print_message(message, 'WARN')
def error(self, message):
return self.print_message(message, 'ERROR') | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/log.py | log.py |
import os
import tkinter as tk
from tkinter import filedialog
from pathlib import Path
filetypes=(
('text file', '*.md *.txt'),
('Word file', '*.doc *.docx'),
('CSV file', '*.csv'),
('SQL file', '*.sql *.hql'),
('Python file', '*.py'),
('Java file', '*.jar *.java *.class'),
('Go file', '*.go'),
('All file', '*'),
)
class ZPath(object):
def __init__(self, path):
self._path = path
def __str__(self):
return '<zoran_tools.Path>\n{}'.format(self.path)
def __repr__(self):
return self.__str__()
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def basename(self):
return os.path.basename(self.path)
@property
def abspath(self):
return os.path.abspath(self.path)
@property
def dirname(self):
return os.path.dirname(self.abspath)
def join(self, *paths, split=True, new=False):
if split:
dir_name = os.path.splitext(self.abspath)[0]
else:
dir_name = self.path
if new:
return ZPath(os.path.join(dir_name, *paths))
else:
return os.path.join(dir_name, *paths)
def make(self, split=True, new=False):
if split:
dir_name = os.path.splitext(self.abspath)[0]
else:
dir_name = self.path
if not os.path.exists(dir_name) or not os.path.isdir(dir_name):
os.makedirs(dir_name)
if new:
return ZPath(dir_name)
else:
return dir_name
def isdir(self):
return os.path.isdir(self.path)
def isfile(self):
return os.path.isfile(self.path)
def exist(self):
return os.path.exists(self.abspath)
def children(self):
return {self.basename: os.listdir(self.path)}
def tree(self, maxlevel=3):
if maxlevel == 0:
return self.basename
try:
return {
self.basename: [
self.join(e, new=True).tree(maxlevel=maxlevel-1)
if self.join(e, new=True).isdir() else self.join(e, new=True).basename
for e in os.listdir(self.path)
]
}
except PermissionError:
return self.basename
def plot_tree(self, maxlevel=3):
def _plot(node, plot='', level=0):
if isinstance(node, str):
plot += (' ' * 2) * (level + 1) + '|-- ' + node + '\n'
elif isinstance(node, dict):
for k in node:
v = node[k]
plot = _plot(plot=plot, node=k, level=level)
plot = _plot(plot=plot, node=v, level=level + 1)
elif isinstance(node, list):
node = sorted(node, key=lambda e: not isinstance(e, str))
for i, e in enumerate(node):
plot = _plot(plot=plot, node=e, level=level + 1)
return plot
return _plot(node=self.tree(maxlevel=maxlevel))
class File(ZPath):
pass
class Directory(ZPath):
pass
def plot_tree(path=None, maxlevel=3):
if path is None:
path = os.getcwd()
return ZPath(path).plot_tree(maxlevel=maxlevel)
def list_files(directory=None, fm=None, return_abs=False, mode=None):
"""
返回文件夹下的所有文件<list>
:param directory: <str> 文件夹路径
如果给出文件夹路径, 则返回该文件夹下的文件;
如果没有给出文件夹路径, 则返回控制台所在文件夹下的文件
:param fm: <str, list> 指定文件格式
如果给出了文件格式, 则返回指定格式的文件;
如果没有给出文件格式, 则返回所有文件
:param isabspath: <bool> 为真时返回绝对路径文件名, 为假时返回相对路径文件名
"""
if not directory:
directory = os.getcwd()
files = os.listdir(directory)
if isinstance(mode, File):
files = [file for file in files if os.path.isfile(file)]
elif isinstance(mode, Directory):
files = [file for file in files if os.path.isdir(file)]
else:
pass
if isinstance(fm, str):
fm = [fm]
if isinstance(fm, (list, tuple)):
fm = ['.{}'.format(e) for e in fm]
files = [file for file in files if os.path.splitext(file)[-1] in fm]
if return_abs:
files = [os.path.abspath(file) for file in files]
else:
files = [os.path.basename(file) for file in files]
return files
def create_father_dir(filename):
"""
接收一个文件名, 判断其父路径是否存在, 如果不存在, 则创建
:param filename: <str>接收的文件路径, 为相对路径
"""
abs_filename = os.path.abspath(filename)
father_dir = os.path.dirname(abs_filename)
if not os.path.exists(father_dir) or not os.path.isdir(father_dir):
os.makedirs(father_dir)
return father_dir
def create_dir_same_as_filename(filename):
"""
创建文件同名文件夹
:param filename: <str>接收的文件路径, 为相对路径或绝对路径
:param tail: <str>有时候生成的文件夹后要加个尾缀
"""
abs_filename = os.path.abspath(filename)
split_extension_abs_filename = os.path.splitext(abs_filename)[0]
if os.path.exists(split_extension_abs_filename) and os.path.isdir(split_extension_abs_filename):
return split_extension_abs_filename
else:
os.makedirs(split_extension_abs_filename)
return split_extension_abs_filename
# tkinter.filedialog.asksaveasfile():选择以什么文件保存,创建文件并返回文件流对象
# tkinter.filedialog.askopenfile():选择打开什么文件,返回IO流对象
# tkinter.filedialog.askopenfiles():选择打开多个文件,以列表形式返回多个IO流对象
def get_goal_by_dialog_box(goal='file', filetype=None):
"""
启用对话框, 以根据goal参数的不同选择文件或文件夹
:param goal:
:param filetype:
:return: 返回文件名或文件夹名
"""
root = tk.Tk()
root.withdraw()
goal_dict = {
'file': filedialog.askopenfilename,
'files': filedialog.askopenfilenames,
'directory': filedialog.askdirectory,
'dir': filedialog.askdirectory,
'saveas': filedialog.asksaveasfilename,
'save_as': filedialog.asksaveasfilename,
}
goal_func = goal_dict.get(goal)
goal_name = goal_func(filetype=filetype) if isinstance(filetype, tuple) else goal_func()
root.destroy()
return goal_name
def ask_file(filetype=None):
"""
打开一个对话框, 以选择文件, 返回文件路径.
利用了tkinter框架
示例:
a = ask_file(filetype=(
('text file', '*.md *.txt'),
('word file', '*.doc *.docx'),
('all file', '*'),
)
)
:return: 返回文件绝对路径名
"""
return get_goal_by_dialog_box(goal='file', filetype=filetype)
def ask_files(filetype=None):
"""
打开一个对话框, 以选择多个文件, 返回文件名列表
:return:
"""
return get_goal_by_dialog_box(goal='files', filetype=filetype)
def ask_dir():
"""
打开一个对话框, 以选择文件夹, 返回文件夹名
:return:
"""
return get_goal_by_dialog_box(goal='directory')
def ask_save_as():
"""
打开一个对话框,以选择文件名,如果文件名已存在,则询问是否覆盖
:return:
"""
return get_goal_by_dialog_box(goal='save_as')
def ask_chdir():
"""
切换控制台路径
:return:
"""
return os.chdir(ask_dir())
def get_size(dir_or_file, unit='m'):
"""
计算文件或文件夹的大小
:param dir_or_file: 文件或文件夹路径
:param unit: 返回的单位,默认为MB
:return:
"""
size = 0
if os.path.isfile(dir_or_file):
size = os.path.getsize(dir_or_file)
elif os.path.isdir(dir_or_file):
for root, dirs, files in os.walk(dir_or_file):
size += sum(map(lambda e: os.stat(os.path.join(root, e)).st_size, files))
else:
return 0
unit_dict = {
'k': lambda x: x / 1024,
'kb': lambda x: x / 1024,
'm': lambda x: x / 1024 / 1024,
'mb': lambda x: x / 1024 / 1024,
'g': lambda x: x / 1024 / 1024 / 1024,
'gb': lambda x: x / 1024 / 1024 / 1024,
}
return '{:.2f}'.format(unit_dict.get(unit.lower(), lambda x: x)(size)) | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/path_tools.py | path_tools.py |
import re
import csv
from .exception import LengthException
from tools.prettytable import PrettyTable
class Square(object):
def __init__(self, length=0, width=0):
self.length = length
self.width = width
def __str__(self):
return 'size: {} lines × {} columns'.format(self.length, self.width)
def __repr__(self):
return self.__str__()
class FakeFrame(object):
def __init__(self):
self._content = [Row(fields='_1', row=[''])] # 这种设计太不省内存了
self.name = None
def __str__(self):
return '<frame.sql.FakeFrame>,it has {} lines, \nits first row fields are {}' \
.format(self.length, self.dtypes)
def __repr__(self):
return '<frame.sql.FakeFrame>,it has {} lines'.format(self.length)
def __len__(self):
return self._content.__len__()
def __getitem__(self, item):
# 如果对FakeFrame对象切片,则返回一个切片了的FakeFrame对象
if isinstance(item, slice):
fakeframe = FakeFrame()
fakeframe._content = self._content.__getitem__(item)
return fakeframe
# 如果对FakeFrame对象索引求值,则返回FakeFrame对象的一个元素,即一个Row对象
elif isinstance(item, int):
return self._content.__getitem__(item)
def __setitem__(self, key, value):
return self._content.__setitem__(key, value)
def __add__(self, other):
fakefarame = FakeFrame()
fakefarame._content = self.collection + other.collection
return fakefarame
@property
def length(self):
"""
:return: 返回长度,即对象的行数
"""
return self.__len__()
@property
def width(self):
"""
:return: 返回对象字段数最长的一行的长度(字段数)
"""
return max([len(row) for row in self])
@property
def square(self):
"""
:return: 返回表示对象长度、宽度的对象
"""
return Square(length=self.length, width=self.width)
@property
def dtypes(self):
"""
:return: 返回第一行的字段
"""
return [(f, type(e)) for f, e in zip(self.first().fields, self.first().collection)]
@property
def collection(self):
"""
:return: 返回一个列表,列表的每一个元素都是一个Row对象
"""
return self._content
def pure_content(self):
"""
:return: 返回一个列表,列表的每一个元素都是一个列表
"""
return [row.values for row in self]
def first_row_fields(self):
"""
:return: 返回第一行的字段
"""
return self.first().fields
def take(self, num=1):
"""
:param num:
:return: 如果num是int类型,则取出第num行;如果是切片,则取出切片。
"""
return self[num]
def show(self, num=None, printt=True):
"""
以友好方式打印要预览的内容
:param num:
:param printt:
:return:
"""
if isinstance(num, (int, slice)):
fakeframe = self[:num] if isinstance(num, int) else self[num]
x = PrettyTable()
x._set_field_names(fakeframe.first_row_fields())
for row in fakeframe:
x.add_row(row=row.collection)
if printt:
print(x)
else:
return x.__str__()
else:
print('show limit 10 lines')
return self.show(num=10)
def count(self):
"""
:return: 返回对象的行数
"""
return self.length
def first(self):
"""
:return: 返回对象的第一行
"""
return self[0]
def lengthies(self):
"""
:return: 返回对象每一行的宽度
"""
return [len(row) for row in self]
def neat(self):
"""判断对象每一行宽度是否相等, 相等返回True,否则返回False"""
return len(set(self.lengthies())) == 1
def select(self, *items, new=False):
"""
选择列
:param items: 要选择的字段,可以list,也可以是一组字符串,也可以是包含多个字段信息的一个字符串
:param new:
:return: 返回一个FakeFrame对象
"""
content = [row.select(items) for row in self]
if new:
fakeframe = FakeFrame()
fakeframe._content = content
return fakeframe
else:
self._content = content
return self
def _update(self, content, new=False):
"""重新生成一个有着全新_content的FakeFrame对象"""
if new:
fakeframe = FakeFrame()
fakeframe._content = content
return fakeframe
else:
self._content = content
return self
def delete(self, condition, new=False):
"""
删除满足条件的行
:param condition: 条件,一个返回真值的函数
:param new: 新建一个FakeFrame对象还是在原对象上更新
:return:
"""
content = [row for row in self if not row.filter(condition)]
return self._update(content, new)
def remove(self, row):
"""移除指定的行,可能会产生未知的问题"""
return self._content.remove(row)
def filter(self, condition, new=False):
"""
留下满足条件的行,参考delete方法
:param condition:
:param new:
:return:
"""
content = [row for row in self if row.filter(condition)]
return self._update(content, new)
def left_join(self, other_df, on_condition):
pass
def pop(self, index=None):
"""从对象中删除最后一行,并返回这一行"""
if index:
row = self._content.pop(index)
else:
row = self._content.pop()
return row
def set_fields(self, fields):
"""重设所有行的字段"""
for row in self:
row.fields = fields
def with_incid(self, position=0, start=1):
"""为对象增加一个自增列"""
for i, row in enumerate(self, start):
row.with_element(key='id', value=i, position=position)
def with_column(self, key, value, position=0):
"""
对对象增加一列
:param key: 增加的列的字段名
:param value: 增加的列的值,可以是一个函数,也可以是固定的值
:param position: 增加的列所在的位置,从0开始
:return:
"""
for row in self:
row.with_element(key=key, value=value, position=position)
def union(self, other):
"""把另一个FakeFrame对象追加到本对象后面"""
self._content += other.collection
def set(self, key):
"""
返回某列的值的集合
:param key: 列名
:return:
"""
return set([row.values[0] for row in self.select(key, new=True)])
def count_key(self, key):
"""
返回某列的值以及其出现的频次
:param key: 列名
:return:
"""
values_set = set([row.values[0] for row in self.select(key, new=True)])
ck = []
for v in values_set:
count = self.filter(condition=lambda row: row[key] == '{}'.format(v), new=True).count()
ck.append((v, count))
return ck
def update_row_value(self, update, condition):
"""
更新某些行的值
:param update: 一个函数,用于修改行元素的值
:param condition: 一个函数,用于选择要修改的行
:return:
"""
pass
def to_csv(self, file, head=False, encoding='utf8', quoting=csv.QUOTE_ALL, mode='append'):
"""
将对象内容写入CSV文件
:param file: 要写入的文件名
:param head: 是否要将列名写入
:param encoding:
:param quoting:
:param mode:
:return:
"""
mode_dict = {'a+': 'a+', 'append': 'a+', 'write': 'w', 'overwrite': 'w'}
mode = mode_dict.get(mode)
with open(file, mode=mode, encoding=encoding, newline='') as f:
f_csv = csv.writer(f, quoting=quoting)
if head:
f_csv.writerow(self.first_row_fields())
f_csv.writerows(self.pure_content())
def sort(self, key, new=False):
new_content = sorted(self._content, key=key)
if new:
fakeframe = FakeFrame()
fakeframe._content = new_content
return fakeframe
else:
self._content = new_content
return self
class Row(object):
def __init__(self, fields=None, row=None):
self._fields = fields
self._row = row
def __iter__(self):
return self._row.__iter__()
def __setitem__(self, key, value):
for i, f in enumerate(self._fields):
if f == key:
self._row[i] = value
def __str__(self):
return 'Row{}'.format(['"{}": {}'.format(f, e) for f, e in zip(self._fields, self._row)].__str__())
def __repr__(self):
return self.__str__()
def __getitem__(self, item):
if isinstance(item, str) and item in self._fields:
item = self._fields.index(item)
if isinstance(item, (int, slice)):
return self._row.__getitem__(item)
else:
raise TypeError
def __len__(self):
return self._row.__len__()
def __add__(self, other):
fields = ['left' + field for field in self.fields] + ['right' + field for field in other.fields]
row = self.collection + other.collection
return Row(fields=fields, row=row)
def filter(self, condition):
return condition(self)
@property
def fields(self):
return self._fields
@fields.setter
def fields(self, fields):
if isinstance(fields, (list, tuple)):
if len(fields) == len(self.fields):
self._fields = fields
else:
raise LengthException
else:
raise TypeError
@property
def values(self):
return self.collect()
def collect(self):
return self._row
@property
def collection(self):
return self.collect()
def select(self, items):
if isinstance(items, tuple):
if len(items) == 1:
items = items[0]
else:
items = list(items)
if isinstance(items, str):
items = re.sub('\s', '', items).split(',')
if not isinstance(items, list):
raise TypeError
if set(items).issubset(self._fields):
row = [self[e] for e in items]
return Row(fields=items, row=row)
else:
print(items)
raise IndexError
def dict(self):
d = dict()
for f, v in zip(self.fields, self.collection):
d[f] = v
return d
def with_element(self, key, value='', position=0):
if hasattr(value, '__call__'):
value = value(self)
self._fields.insert(position, key)
self._row.insert(position, value)
def update_row_value(self, update):
pass | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/frame/frame/sql.py | sql.py |
__version__ = "TRUNK"
import copy
import csv
import random
import sys
import textwrap
import itertools
import unicodedata
py3k = sys.version_info[0] >= 3
if py3k:
unicode = str
basestring = str
itermap = map
iterzip = zip
uni_chr = chr
else:
itermap = itertools.imap
iterzip = itertools.izip
uni_chr = unichr
if py3k and sys.version_info[1] >= 2:
from html import escape
else:
from cgi import escape
# hrule styles
FRAME = 0
ALL = 1
NONE = 2
# Table styles
DEFAULT = 10
MSWORD_FRIENDLY = 11
PLAIN_COLUMNS = 12
RANDOM = 20
def _get_size(text):
lines = text.split("\n")
height = len(lines)
width = max([_str_block_width(line) for line in lines])
return (width, height)
class PrettyTable(object):
def __init__(self, field_names=None, **kwargs):
"""Return a new PrettyTable instance
Arguments:
encoding - Unicode encoding scheme used to decode any encoded input
field_names - list or tuple of field names
fields - list or tuple of field names to include in displays
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order"""
if "encoding" in kwargs:
self.encoding = kwargs["encoding"]
else:
self.encoding = "UTF-8"
# Data
self._field_names = []
self._align = {}
self._max_width = {}
self._rows = []
if field_names:
self.field_names = field_names
else:
self._widths = []
self._rows = []
# Options
self._options = "start end fields header border sortby reversesort sort_key attributes format hrules".split()
self._options.extend("int_format float_format padding_width left_padding_width right_padding_width".split())
self._options.extend("vertical_char horizontal_char junction_char header_style".split())
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
else:
kwargs[option] = None
self._start = kwargs["start"] or 0
self._end = kwargs["end"] or None
self._fields = kwargs["fields"] or None
self._header = kwargs["header"] or True
self._header_style = kwargs["header_style"] or None
self._border = kwargs["border"] or True
self._hrules = kwargs["hrules"] or FRAME
self._sortby = kwargs["sortby"] or None
self._reversesort = kwargs["reversesort"] or False
self._sort_key = kwargs["sort_key"] or (lambda x: x)
self._int_format = kwargs["int_format"] or {}
self._float_format = kwargs["float_format"] or {}
self._padding_width = kwargs["padding_width"] or 1
self._left_padding_width = kwargs["left_padding_width"] or None
self._right_padding_width = kwargs["right_padding_width"] or None
self._vertical_char = kwargs["vertical_char"] or self._unicode("|")
self._horizontal_char = kwargs["horizontal_char"] or self._unicode("-")
self._junction_char = kwargs["junction_char"] or self._unicode("+")
self._format = kwargs["format"] or False
self._attributes = kwargs["attributes"] or {}
def _unicode(self, value):
if not isinstance(value, basestring):
value = str(value)
if not isinstance(value, unicode):
value = unicode(value, self.encoding, "strict")
return value
def _justify(self, text, width, align):
excess = width - _str_block_width(text)
if align == "l":
return text + excess * " "
elif align == "r":
return excess * " " + text
else:
if excess % 2:
# Uneven padding
# Put more space on right if text is of odd length...
if _str_block_width(text) % 2:
return (excess // 2) * " " + text + (excess // 2 + 1) * " "
# and more space on left if text is of even length
else:
return (excess // 2 + 1) * " " + text + (excess // 2) * " "
# Why distribute extra space this way? To match the behaviour of
# the inbuilt str.center() method.
else:
# Equal padding on either side
return (excess // 2) * " " + text + (excess // 2) * " "
def __getattr__(self, name):
if name == "rowcount":
return len(self._rows)
elif name == "colcount":
if self._field_names:
return len(self._field_names)
elif self._rows:
return len(self._rows[0])
else:
return 0
else:
raise AttributeError(name)
def __getitem__(self, index):
newtable = copy.deepcopy(self)
if isinstance(index, slice):
newtable._rows = self._rows[index]
elif isinstance(index, int):
newtable._rows = [self._rows[index], ]
else:
raise Exception("Index %s is invalid, must be an integer or slice" % str(index))
return newtable
if py3k:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode(self.encoding)
def __unicode__(self):
return self.get_string()
##############################
# ATTRIBUTE VALIDATORS #
##############################
# The method _validate_option is all that should be used elsewhere in the code base to validate options.
# It will call the appropriate validation method for that option. The individual validation methods should
# never need to be called directly (although nothing bad will happen if they *are*).
# Validation happens in TWO places.
# Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section.
# Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings
def _validate_option(self, option, val):
if option in ("field_names"):
self._validate_field_names(val)
elif option in (
"start", "end", "max_width", "padding_width", "left_padding_width", "right_padding_width", "format"):
self._validate_nonnegative_int(option, val)
elif option in ("sortby"):
self._validate_field_name(option, val)
elif option in ("sort_key"):
self._validate_function(option, val)
elif option in ("hrules"):
self._validate_hrules(option, val)
elif option in ("fields"):
self._validate_all_field_names(option, val)
elif option in ("header", "border", "reversesort"):
self._validate_true_or_false(option, val)
elif option in ("header_style"):
self._validate_header_style(val)
# elif option in ("int_format"):
# self._validate_int_format(option, val)
# elif option in ("float_format"):
# self._validate_float_format(option, val)
elif option in ("vertical_char", "horizontal_char", "junction_char"):
self._validate_single_char(option, val)
elif option in ("attributes"):
self._validate_attributes(option, val)
else:
raise Exception("Unrecognised option: %s!" % option)
def _validate_field_names(self, val):
# Check for appropriate length
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (
len(val), len(self._field_names)))
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
raise Exception("Field name list has incorrect number of values, (actual) %d!=%d (expected)" % (
len(val), len(self._rows[0])))
# Check for uniqueness
try:
assert len(val) == len(set(val))
except AssertionError:
raise Exception("Field names must be unique!")
def _validate_header_style(self, val):
try:
assert val in ("cap", "title", "upper", "lower", None)
except AssertionError:
raise Exception("Invalid header style, use cap, title, upper, lower or None!")
def _validate_align(self, val):
try:
assert val in ["l", "c", "r"]
except AssertionError:
raise Exception("Alignment %s is invalid, use l, c or r!" % val)
def _validate_nonnegative_int(self, name, val):
try:
assert int(val) >= 0
except AssertionError:
raise Exception("Invalid value for %s: %s!" % (name, self._unicode(val)))
def _validate_true_or_false(self, name, val):
try:
assert val in (True, False)
except AssertionError:
raise Exception("Invalid value for %s! Must be True or False." % name)
def _validate_int_format(self, name, val):
if val == "":
return
try:
assert type(val) in (str, unicode)
assert val.isdigit()
except AssertionError:
raise Exception("Invalid value for %s! Must be an integer format string." % name)
def _validate_float_format(self, name, val):
if val == "":
return
try:
assert type(val) in (str, unicode)
assert "." in val
bits = val.split(".")
assert len(bits) <= 2
assert bits[0] == "" or bits[0].isdigit()
assert bits[1] == "" or bits[1].isdigit()
except AssertionError:
raise Exception("Invalid value for %s! Must be a float format string." % name)
def _validate_function(self, name, val):
try:
assert hasattr(val, "__call__")
except AssertionError:
raise Exception("Invalid value for %s! Must be a function." % name)
def _validate_hrules(self, name, val):
try:
assert val in (ALL, FRAME, NONE)
except AssertionError:
raise Exception("Invalid value for %s! Must be ALL, FRAME or NONE." % name)
def _validate_field_name(self, name, val):
try:
assert val in self._field_names
except AssertionError:
raise Exception("Invalid field name: %s!" % val)
def _validate_all_field_names(self, name, val):
try:
for x in val:
self._validate_field_name(name, x)
except AssertionError:
raise Exception("fields must be a sequence of field names!")
def _validate_single_char(self, name, val):
try:
assert _str_block_width(val) == 1
except AssertionError:
raise Exception("Invalid value for %s! Must be a string of length 1." % name)
def _validate_attributes(self, name, val):
try:
assert isinstance(val, dict)
except AssertionError:
raise Exception("attributes must be a dictionary of name/value pairs!")
##############################
# ATTRIBUTE MANAGEMENT #
##############################
def _get_field_names(self):
return self._field_names
"""The names of the fields
Arguments:
fields - list or tuple of field names"""
def _set_field_names(self, val):
val = [self._unicode(x) for x in val]
self._validate_option("field_names", val)
if self._field_names:
old_names = self._field_names[:]
self._field_names = val
if self._align and old_names:
for old_name, new_name in zip(old_names, val):
self._align[new_name] = self._align[old_name]
for old_name in old_names:
self._align.pop(old_name)
else:
for field in self._field_names:
self._align[field] = "c"
field_names = property(_get_field_names, _set_field_names)
def _get_align(self):
return self._align
def _set_align(self, val):
self._validate_align(val)
for field in self._field_names:
self._align[field] = val
align = property(_get_align, _set_align)
def _get_max_width(self):
return self._max_width
def _set_max_width(self, val):
self._validate_option("max_width", val)
for field in self._field_names:
self._max_width[field] = val
max_width = property(_get_max_width, _set_max_width)
def _get_start(self):
"""Start index of the range of rows to print
Arguments:
start - index of first data row to include in output"""
return self._start
def _set_start(self, val):
self._validate_option("start", val)
self._start = val
start = property(_get_start, _set_start)
def _get_end(self):
"""End index of the range of rows to print
Arguments:
end - index of last data row to include in output PLUS ONE (list slice style)"""
return self._end
def _set_end(self, val):
self._validate_option("end", val)
self._end = val
end = property(_get_end, _set_end)
def _get_sortby(self):
"""Name of field by which to sort rows
Arguments:
sortby - field name to sort by"""
return self._sortby
def _set_sortby(self, val):
self._validate_option("sortby", val)
self._sortby = val
sortby = property(_get_sortby, _set_sortby)
def _get_reversesort(self):
"""Controls direction of sorting (ascending vs descending)
Arguments:
reveresort - set to True to sort by descending order, or False to sort by ascending order"""
return self._reversesort
def _set_reversesort(self, val):
self._validate_option("reversesort", val)
self._reversesort = val
reversesort = property(_get_reversesort, _set_reversesort)
def _get_sort_key(self):
"""Sorting key function, applied to data points before sorting
Arguments:
sort_key - a function which takes one argument and returns something to be sorted"""
return self._sort_key
def _set_sort_key(self, val):
self._validate_option("sort_key", val)
self._sort_key = val
sort_key = property(_get_sort_key, _set_sort_key)
def _get_header(self):
"""Controls printing of table header with field names
Arguments:
header - print a header showing field names (True or False)"""
return self._header
def _set_header(self, val):
self._validate_option("header", val)
self._header = val
header = property(_get_header, _set_header)
def _get_header_style(self):
"""Controls stylisation applied to field names in header
Arguments:
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)"""
return self._header_style
def _set_header_style(self, val):
self._validate_header_style(val)
self._header_style = val
header_style = property(_get_header_style, _set_header_style)
def _get_border(self):
"""Controls printing of border around table
Arguments:
border - print a border around the table (True or False)"""
return self._border
def _set_border(self, val):
self._validate_option("border", val)
self._border = val
border = property(_get_border, _set_border)
def _get_hrules(self):
"""Controls printing of horizontal rules after rows
Arguments:
hrules - horizontal rules style. Allowed values: FRAME, ALL, NONE"""
return self._hrules
def _set_hrules(self, val):
self._validate_option("hrules", val)
self._hrules = val
hrules = property(_get_hrules, _set_hrules)
def _get_int_format(self):
"""Controls formatting of integer data
Arguments:
int_format - integer format string"""
return self._int_format
def _set_int_format(self, val):
# self._validate_option("int_format", val)
for field in self._field_names:
self._int_format[field] = val
int_format = property(_get_int_format, _set_int_format)
def _get_float_format(self):
"""Controls formatting of floating point data
Arguments:
float_format - floating point format string"""
return self._float_format
def _set_float_format(self, val):
# self._validate_option("float_format", val)
for field in self._field_names:
self._float_format[field] = val
float_format = property(_get_float_format, _set_float_format)
def _get_padding_width(self):
"""The number of empty spaces between a column's edge and its content
Arguments:
padding_width - number of spaces, must be a positive integer"""
return self._padding_width
def _set_padding_width(self, val):
self._validate_option("padding_width", val)
self._padding_width = val
padding_width = property(_get_padding_width, _set_padding_width)
def _get_left_padding_width(self):
"""The number of empty spaces between a column's left edge and its content
Arguments:
left_padding - number of spaces, must be a positive integer"""
return self._left_padding_width
def _set_left_padding_width(self, val):
self._validate_option("left_padding_width", val)
self._left_padding_width = val
left_padding_width = property(_get_left_padding_width, _set_left_padding_width)
def _get_right_padding_width(self):
"""The number of empty spaces between a column's right edge and its content
Arguments:
right_padding - number of spaces, must be a positive integer"""
return self._right_padding_width
def _set_right_padding_width(self, val):
self._validate_option("right_padding_width", val)
self._right_padding_width = val
right_padding_width = property(_get_right_padding_width, _set_right_padding_width)
def _get_vertical_char(self):
"""The charcter used when printing table borders to draw vertical lines
Arguments:
vertical_char - single character string used to draw vertical lines"""
return self._vertical_char
def _set_vertical_char(self, val):
val = self._unicode(val)
self._validate_option("vertical_char", val)
self._vertical_char = val
vertical_char = property(_get_vertical_char, _set_vertical_char)
def _get_horizontal_char(self):
"""The charcter used when printing table borders to draw horizontal lines
Arguments:
horizontal_char - single character string used to draw horizontal lines"""
return self._horizontal_char
def _set_horizontal_char(self, val):
val = self._unicode(val)
self._validate_option("horizontal_char", val)
self._horizontal_char = val
horizontal_char = property(_get_horizontal_char, _set_horizontal_char)
def _get_junction_char(self):
"""The charcter used when printing table borders to draw line junctions
Arguments:
junction_char - single character string used to draw line junctions"""
return self._junction_char
def _set_junction_char(self, val):
val = self._unicode(val)
self._validate_option("vertical_char", val)
self._junction_char = val
junction_char = property(_get_junction_char, _set_junction_char)
def _get_format(self):
"""Controls whether or not HTML tables are formatted to match styling options
Arguments:
format - True or False"""
return self._format
def _set_format(self, val):
self._validate_option("format", val)
self._format = val
format = property(_get_format, _set_format)
def _get_attributes(self):
"""A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML
Arguments:
attributes - dictionary of attributes"""
return self._attributes
def _set_attributes(self, val):
self._validate_option("attributes", val)
self._attributes = val
attributes = property(_get_attributes, _set_attributes)
##############################
# OPTION MIXER #
##############################
def _get_options(self, kwargs):
options = {}
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
options[option] = kwargs[option]
else:
options[option] = getattr(self, "_" + option)
return options
##############################
# PRESET STYLE LOGIC #
##############################
def set_style(self, style):
if style == DEFAULT:
self._set_default_style()
elif style == MSWORD_FRIENDLY:
self._set_msword_style()
elif style == PLAIN_COLUMNS:
self._set_columns_style()
elif style == RANDOM:
self._set_random_style()
else:
raise Exception("Invalid pre-set style!")
def _set_default_style(self):
self.header = True
self.border = True
self._hrules = FRAME
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
self.horizontal_char = "-"
self.junction_char = "+"
def _set_msword_style(self):
self.header = True
self.border = True
self._hrules = NONE
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = "|"
def _set_columns_style(self):
self.header = True
self.border = False
self.padding_width = 1
self.left_padding_width = 0
self.right_padding_width = 8
def _set_random_style(self):
# Just for fun!
self.header = random.choice((True, False))
self.border = random.choice((True, False))
self._hrules = random.choice((ALL, FRAME, NONE))
self.left_padding_width = random.randint(0, 5)
self.right_padding_width = random.randint(0, 5)
self.vertical_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.horizontal_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
self.junction_char = random.choice("~!@#$%^&*()_+|-=\{}[];':\",./;<>?")
##############################
# DATA INPUT METHODS #
##############################
def add_row(self, row):
"""Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields"""
if self._field_names and len(row) != len(self._field_names):
raise Exception(
"Row has incorrect number of values, (actual) %d!=%d (expected)" % (len(row), len(self._field_names)))
if not self._field_names:
self.field_names = [("Field %d" % (n + 1)) for n in range(0, len(row))]
self._rows.append(list(row))
def del_row(self, row_index):
"""Delete a row to the table
Arguments:
row_index - The index of the row you want to delete. Indexing starts at 0."""
if row_index > len(self._rows) - 1:
raise Exception("Cant delete row at index %d, table only has %d rows!" % (row_index, len(self._rows)))
del self._rows[row_index]
def add_column(self, fieldname, column, align="c"):
"""Add a column to the table.
Arguments:
fieldname - name of the field to contain the new column of data
column - column of data, should be a list with as many elements as the
table has rows
align - desired alignment for this column - "l" for left, "c" for centre and "r" for right"""
if len(self._rows) in (0, len(column)):
self._validate_align(align)
self._field_names.append(fieldname)
self._align[fieldname] = align
for i in range(0, len(column)):
if len(self._rows) < i + 1:
self._rows.append([])
self._rows[i].append(column[i])
else:
raise Exception("Column length %d does not match number of rows %d!" % (len(column), len(self._rows)))
def clear_rows(self):
"""Delete all rows from the table but keep the current field names"""
self._rows = []
def clear(self):
"""Delete all rows and field names from the table, maintaining nothing but styling options"""
self._rows = []
self._field_names = []
self._widths = []
##############################
# MISC PUBLIC METHODS #
##############################
def copy(self):
return copy.deepcopy(self)
##############################
# MISC PRIVATE METHODS #
##############################
def _format_value(self, field, value):
if isinstance(value, int) and field in self._int_format:
value = self._unicode(("{0:" + self._int_format[field] + "}").format(value))
elif isinstance(value, float) and field in self._float_format:
value = self._unicode(("{0:" + self._float_format[field] + "}").format(value))
return self._unicode(value)
def _compute_widths(self, rows, options):
if options["header"]:
widths = [_get_size(field)[0] for field in self._field_names]
else:
widths = len(self.field_names) * [0]
for row in rows:
for index, value in enumerate(row):
fieldname = self.field_names[index]
if fieldname in self.max_width:
widths[index] = max(widths[index], min(_get_size(value)[0], self.max_width[fieldname]))
else:
widths[index] = max(widths[index], _get_size(value)[0])
self._widths = widths
def _get_padding_widths(self, options):
if options["left_padding_width"] is not None:
lpad = options["left_padding_width"]
else:
lpad = options["padding_width"]
if options["right_padding_width"] is not None:
rpad = options["right_padding_width"]
else:
rpad = options["padding_width"]
return lpad, rpad
def _get_rows(self, options):
"""Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings."""
# Make a copy of only those rows in the slice range
rows = copy.deepcopy(self._rows[options["start"]:options["end"]])
# Sort if necessary
if options["sortby"]:
sortindex = self._field_names.index(options["sortby"])
# Decorate
rows = [[row[sortindex]] + row for row in rows]
# Sort
rows.sort(reverse=options["reversesort"], key=options["sort_key"])
# Undecorate
rows = [row[1:] for row in rows]
return rows
def _format_row(self, row, options):
return [self._format_value(field, value) for (field, value) in zip(self._field_names, row)]
def _format_rows(self, rows, options):
return [self._format_row(row, options) for row in rows]
##############################
# PLAIN TEXT STRING METHODS #
##############################
def get_string(self, **kwargs):
"""Return string representation of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order"""
options = self._get_options(kwargs)
lines = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the header?
if self.rowcount == 0:
return ""
# Get the rows we need to print, taking into account slicing, sorting, etc.
rows = self._get_rows(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows, options)
# Compute column widths
self._compute_widths(formatted_rows, options)
# Add header or top of border
self._hrule = self._stringify_hrule(options)
if options["header"]:
lines.append(self._stringify_header(options))
elif options["border"] and options["hrules"] != NONE:
lines.append(self._hrule)
# Add rows
for row in formatted_rows:
lines.append(self._stringify_row(row, options))
# Add bottom of border
if options["border"] and not options["hrules"]:
lines.append(self._hrule)
return self._unicode("\n").join(lines)
def _stringify_hrule(self, options):
if not options["border"]:
return ""
lpad, rpad = self._get_padding_widths(options)
bits = [options["junction_char"]]
for field, width in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
bits.append((width + lpad + rpad) * options["horizontal_char"])
bits.append(options["junction_char"])
return "".join(bits)
def _stringify_header(self, options):
bits = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["hrules"] != NONE:
bits.append(self._hrule)
bits.append("\n")
bits.append(options["vertical_char"])
for field, width, in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
if self._header_style == "cap":
fieldname = field.capitalize()
elif self._header_style == "title":
fieldname = field.title()
elif self._header_style == "upper":
fieldname = field.upper()
elif self._header_style == "lower":
fieldname = field.lower()
else:
fieldname = field
bits.append(" " * lpad + self._justify(fieldname, width, self._align[field]) + " " * rpad)
if options["border"]:
bits.append(options["vertical_char"])
if options["border"] and options["hrules"] != NONE:
bits.append("\n")
bits.append(self._hrule)
return "".join(bits)
def _stringify_row(self, row, options):
for index, field, value, width, in zip(range(0, len(row)), self._field_names, row, self._widths):
# Enforce max widths
lines = value.split("\n")
new_lines = []
for line in lines:
if _str_block_width(line) > width:
line = textwrap.fill(line, width)
new_lines.append(line)
lines = new_lines
value = "\n".join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits = []
lpad, rpad = self._get_padding_widths(options)
for y in range(0, row_height):
bits.append([])
if options["border"]:
bits[y].append(self.vertical_char)
for field, value, width, in zip(self._field_names, row, self._widths):
lines = value.split("\n")
if len(lines) < row_height:
lines = lines + ([""] * (row_height - len(lines)))
y = 0
for l in lines:
if options["fields"] and field not in options["fields"]:
continue
bits[y].append(" " * lpad + self._justify(l, width, self._align[field]) + " " * rpad)
if options["border"]:
bits[y].append(self.vertical_char)
y += 1
if options["border"] and options["hrules"] == ALL:
bits[row_height - 1].append("\n")
bits[row_height - 1].append(self._hrule)
for y in range(0, row_height):
bits[y] = "".join(bits[y])
return "\n".join(bits)
##############################
# HTML STRING METHODS #
##############################
def get_html_string(self, **kwargs):
"""Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag"""
options = self._get_options(kwargs)
if options["format"]:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string
def _get_simple_html_string(self, options):
lines = []
open_tag = []
open_tag.append("<table")
if options["border"]:
open_tag.append(" border=\"1\"")
if options["attributes"]:
for attr_name in options["attributes"]:
open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name]))
open_tag.append(">")
lines.append("".join(open_tag))
# Headers
if options["header"]:
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <th>%s</th>" % escape(field).replace("\n", "<br />"))
lines.append(" </tr>")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
for row in formatted_rows:
lines.append(" <tr>")
for field, datum in zip(self._field_names, row):
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <td>%s</td>" % escape(datum).replace("\n", "<br />"))
lines.append(" </tr>")
lines.append("</table>")
return self._unicode("\n").join(lines)
def _get_formatted_html_string(self, options):
lines = []
lpad, rpad = self._get_padding_widths(options)
open_tag = []
open_tag.append("<table")
if options["border"]:
open_tag.append(" border=\"1\"")
if options["hrules"] == NONE:
open_tag.append(" frame=\"vsides\" rules=\"cols\"")
if options["attributes"]:
for attr_name in options["attributes"]:
open_tag.append(" %s=\"%s\"" % (attr_name, options["attributes"][attr_name]))
open_tag.append(">")
lines.append("".join(open_tag))
# Headers
if options["header"]:
lines.append(" <tr>")
for field in self._field_names:
if options["fields"] and field not in options["fields"]:
continue
lines.append(
" <th style=\"padding-left: %dem; padding-right: %dem; text-align: center\">%s</th>" % (
lpad, rpad, escape(field).replace("\n", "<br />")))
lines.append(" </tr>")
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
aligns = []
for field in self._field_names:
aligns.append({"l": "left", "r": "right", "c": "center"}[self._align[field]])
for row in formatted_rows:
lines.append(" <tr>")
for field, datum, align in zip(self._field_names, row, aligns):
if options["fields"] and field not in options["fields"]:
continue
lines.append(" <td style=\"padding-left: %dem; padding-right: %dem; text-align: %s\">%s</td>" % (
lpad, rpad, align, escape(datum).replace("\n", "<br />")))
lines.append(" </tr>")
lines.append("</table>")
return self._unicode("\n").join(lines)
##############################
# UNICODE WIDTH FUNCTIONS #
##############################
def _char_block_width(char):
# Basic Latin, which is probably the most common case
# if char in xrange(0x0021, 0x007e):
# if char >= 0x0021 and char <= 0x007e:
if 0x0021 <= char <= 0x007e:
return 1
# Chinese, Japanese, Korean (common)
if 0x4e00 <= char <= 0x9fff:
return 2
# Hangul
if 0xac00 <= char <= 0xd7af:
return 2
# Combining?
if unicodedata.combining(uni_chr(char)):
return 0
# Hiragana and Katakana
if 0x3040 <= char <= 0x309f or 0x30a0 <= char <= 0x30ff:
return 2
# Full-width Latin characters
if 0xff01 <= char <= 0xff60:
return 2
# CJK punctuation
if 0x3000 <= char <= 0x303e:
return 2
# Backspace and delete
if char in (0x0008, 0x007f):
return -1
# Other control characters
elif char in (0x0000, 0x001f):
return 0
# Take a guess
return 1
def _str_block_width(val):
return sum(itermap(_char_block_width, itermap(ord, val)))
##############################
# TABLE FACTORIES #
##############################
def from_csv(fp, field_names=None):
dialect = csv.Sniffer().sniff(fp.read(1024))
fp.seek(0)
reader = csv.reader(fp, dialect)
table = PrettyTable()
if field_names:
table.field_names = field_names
else:
table.field_names = [x.strip() for x in next(reader)]
for row in reader:
table.add_row([x.strip() for x in row])
return table
def from_db_cursor(cursor):
table = PrettyTable()
table.field_names = [col[0] for col in cursor.description]
for row in cursor.fetchall():
table.add_row(row)
return table
##############################
# MAIN (TEST FUNCTION) #
##############################
def main():
x = PrettyTable(["City name", "Area", "Population", "Annual Rainfall"])
x.sortby = "Population"
x.reversesort = True
x.int_format["Area"] = "04d"
x.float_format = "6.1f"
x.align["City name"] = "l" # Left align city names
x.add_row(["Adelaide", 1295, 1158259, 600.5])
x.add_row(["Brisbane", 5905, 1857594, 1146.4])
x.add_row(["Darwin", 112, 120900, 1714.7])
x.add_row(["Hobart", 1357, 205556, 619.5])
x.add_row(["Sydney", 2058, 4336374, 1214.8])
x.add_row(["Melbourne", 1566, 3806092, 646.9])
x.add_row(["Perth", 5386, 1554769, 869.4])
print(x)
if __name__ == "__main__":
main() | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/frame/tools/prettytable.py | prettytable.py |
import math
import matplotlib.pyplot as plt
import numpy as np
from typing import List, Dict
__all__ = ['Barrier', ]
class Barrier(object):
"""
一个栅栏对象, 表示一组有序的点连成的圈圈
"""
def __init__(self, points: List, meta: Dict = None):
"""
:param points: 形如[(x, y), (x, y)...]的列表,列表中每个元素是一组经纬度对
:param name: 栅栏的名称
"""
self.meta = meta
if points[0] != points[-1]:
points.append(points[-1])
self.points = [dict(x=point[0], y=point[1]) for point in points]
self.maxx = max([p[0] for p in points])
self.lines = [(s, e) for s, e in zip(self.points[:-1], self.points[1:])]
self.xs = [point['x'] for point in self.points]
self.ys = [point['y'] for point in self.points]
@staticmethod
def intersect(A, B, P, Q):
"""
判断两条线段AB和PQ是否相交(接触默认为不是相交)
:param A: dict, 线段AB的起点
:param B: dict, 线段AB的终点
:param P: dict, 目标点, 要判断其是否在网格中, PQ的起点
:param Q: dict, 以P为起点, 水平作一条线段到网格边界(最大横坐标处), 终点为Q
:return: 返回布尔值是或否
"""
if (
(A['y'] > P['y'] and B['y'] > P['y']) or # AB两点都在PQ上侧
(A['x'] < P['x'] and B['x'] < P['x']) or # AB两点都在P左侧
(A['y'] < P['y'] and B['y'] < P['y']) # AB两个都在PQ下侧
):
return False
x = (P['y'] - A['y']) * (B['x'] - A['x']) / (B['y'] - A['y']) + A['x']
if P['x'] < x <= Q['x']: # 交点横坐标在PQ两之间
print('有交点, 且交点在PQ之间', A, B)
return True
else:
return False
def point_in_net(self, P):
"""
判断P点是否在self表示的栅栏中
:param P: 要判断的点
"""
Q = dict(x=self.maxx, y=P[1])
P = dict(x=P[0], y=P[1])
# 计算相交的次数, 如果点P在网格的线上, 认为点在网格内, 直接返回True
count = 0
for line in self.lines:
A, B = line
if Net.point_in_line(A, B, P):
print('点在线上')
return True
if Net.intersect(A, B, P, Q):
count += 1
# 如果PQ与网格的点重合, 会算作相交两次, 所以重合了多少个点, 就要减去多少次
for point in self.points:
if point['y'] == P['y'] and (point['x'] - P['x']) * (point['x'] - Q['x']) <= 0:
count -= 1
if count % 2 == 1:
return True
else:
return False
@staticmethod
def point_in_line(A, B, P):
"""
判断P点是否在线段AB上
"""
if A == P or B == P:
return True
if Net.slope(A, B) == Net.slope(A, P) and (A['x'] - P['x']) * (B['x'] - P['x']) <= 0:
return True
return False
@staticmethod
def slope(A, B):
"""
计算线段AB的斜率
"""
if A['x'] == B['x']:
return None
elif A['y'] == B['y']:
return 0
else:
return (A['y'] - B['y']) / (A['x'] - B['x'])
def plot(self, P):
"""
绘图,绘制栅栏和P点
"""
plt.plot(self.xs, self.ys, 'r-o')
plt.plot([P[0], self.maxx], [P[1], P[1]])
is_in = self.point_in_net(P)
if is_in:
plt.title("点在网格中", fontproperties='SimHei')
else:
plt.title("点不在网格中", fontproperties='SimHei')
plt.show()
return is_in | zoran-tools | /zoran_tools-0.2.2.tar.gz/zoran_tools-0.2.2/zoran_tools/calculate/barrier.py | barrier.py |
# ZorbPy
*Python library for integrating with the [Somatic Zorb Engine](https://zorbtouch.com)*
[](https://github.com/SomaticLabs/ZorbPy/blob/master/LICENSE)
[](http://twitter.com/SomaticLabs)
## Installation
First install the [Adafruit BluefruitLE library](https://github.com/adafruit/Adafruit_Python_BluefruitLE).
Please note that this library only currently supports macOS and Linux, as Windows is not currently supported by the [underlying BLE](https://github.com/adafruit/Adafruit_Python_BluefruitLE) package used for this library.
After installing the BluefruitLE library, installation of ZorbPy using [pip](https://pypi.org/project/pip/) is simple:
```sh
pip install zorb
```
## Library Usage
For a quick example on how to use the ZorbPy library, please reference [example.py](https://github.com/SomaticLabs/ZorbPy/blob/master/example.py).
To use the ZorbPy library, you must wrap the functionality of your program in a function that is passed to the `zorb.run()` function call.
Any usage of the functions provided by this library outside of the process started by `zorb.run()` will produce error behavior.
The ZorbPy library provides three main functionalities:
- connecting to advertising Zorb devices
- triggering presets on the Zorb device
- directly controlling actuator intensity on the Zorb device
To connect to an advertising Zorb device:
```python
zorb.connect()
```
To trigger one of the available presets:
```python
zorb.triggerPattern(zorb.POINT_LEFT)
```
*Note that preset haptic emojis are exist for the following emojis:*
🎊, 👈, 👉, 🤛, 🤜, ⏮️, ⏭️, 🙌, 👋, 😯, 😳, 😬, 😊, 😄, 🤣
To directly set the actuator values:
```python
duration = 100
top_left = 0
top_right = 0
bottom_left = 25
bottom_right = 25
zorb.writeActuators(duration, top_left, top_right, bottom_left, bottom_right)
```
Below is a more comprehensive example of a simple program that connects to a Zorb device, plays a confetti pattern upon successful connection, and then updates actuator values based on some hypothetical sensor output.
```python
import zorb
def mainloop():
# perform initial connection to Zorb device
zorb.connect()
# trigger confetti effect upon successful connection
zorb.triggerPattern(zorb.CONFETTI)
# enter infinte loop for updating Zorb device
while True:
top_left = hypothetical_sensor_1.val()
top_right = hypothetical_sensor_2.val()
bottom_left = hypothetical_sensor_3.val()
bottom_right = hypothetical_sensor_4.val()
zorb.writeActuators(10, top_left, top_right, bottom_left, bottom_right)
time.sleep(0.01)
def main():
zorb.run(mainloop)
if __name__ == '__main__':
main()
```
## Style Guide
Contributions to this project should conform to this [Python Style Guide](https://www.python.org/dev/peps/pep-0008/).
## License
ZorbPy is released under the [MIT license](https://github.com/SomaticLabs/ZorbPy/blob/master/LICENSE).
| zorb | /zorb-0.1.5.tar.gz/zorb-0.1.5/README.md | README.md |
# zoResearch
## A notes and annotation manager built on top of [Zotero](http://zotero.com/)
### Uses
***Zotero* is a fantastic resource for keeping track of source metadata and citations. But when it comes to note-taking, it's lacking.** **ZoResearch** takes the sources in your Zotero library, extracts any annotations from associated PDFs and displays them in an easy-to-use interface. For a given project, keep all you notes in an accessible place that updates alongside your research.
- Organize sources by Zotero collection
- Automatically extract annotations from source PDFs
- Add notes for each source
---
### Installation
`
pip install zoresearch
`
---
### How to use
`
import zoresearch
`
`
zoresearch.open(zotero_folder_location, zotero_collection_name=all)
`
- **zotero_folder_location**
- Filepath for Zotero folder installed on your system.
- Ex: C:\\Users\\Username\\Zotero
- **zotero_collection_name, default all**
- Name of Zotero collection for start-up. Defaults to sources in **all** collections. Multiple words permitted. Case agnostic.
- Ex: My Research Project
---
### Interface
Zotero sources are displayed on a scrollable sidebar. The main window displays the selected source with user-generated notes as well as citations from the source's associated PDF (if any).
- Use dropdown menus to select different Zotero collections and to filter sources by either 'Starred' or 'Unread'.
- Click 'Show PDF' for thumbnails.

---
### Source annotations
Highlighting and adding comments to PDFs are convenient ways to take notes. But extracting them can be a pain! zoResearch extracts all these annotations for easy access.
- Annotations are labeled with their absolute page number in the associated PDF and type (highlight, sticky note).
- New annotations are added at zoResearch startup.

---
### Adding notes
In addition to PDF annotations, users can add additional notes directly to the viewer.
- zoResearch automatically saves these notes.

---
### Starring a source
Want to designate some important sources? Simply 'Star' the source so that you can easily find them using the dropdown menu.

| zoresearch | /zoresearch-0.1.0.tar.gz/zoresearch-0.1.0/README.md | README.md |
# Zorg Edison
The `zorg-edison` module wraps the mraa library created by intel for interacting
with IO pins on the Edison microcontroller. While the Python bindings for mraa
work great on their own, the benefit of using Zorg is
that we have already created many of the drivers you
needed for using sensors and output devices. Zorg also
implements multiprocessing so that tasks such as reading and
writing from sensors are non-blocking, allowing you to take full
advantage of multi-processor boards like the Intel Edison.
**Zorg also creates a REST API for you :)**
## Installation
```
pip install zorg-edison
```
## [Documentation](http://zorg-edison.readthedocs.org/)
| zorg-edison | /zorg-edison-0.0.2.tar.gz/zorg-edison-0.0.2/README.md | README.md |
from zorg.adaptor import Adaptor
import sys
try:
import mraa
except ImportError:
sys.stderr.write("Could not load the Python bindings for libmraa\n")
mraa = None
MIN_PULSE_WIDTH = 600
MAX_PULSE_WIDTH = 2600
MAX_PERIOD = 7968
class Edison(Adaptor):
def __init__(self, options):
super(Edison, self).__init__(options)
self.pins = {
"digital": {},
"analog": {},
"pwm": {},
"i2c": None,
}
def servo_write(self, pin, degrees):
pulse_width = MIN_PULSE_WIDTH + \
(degrees / 180.0) * (MAX_PULSE_WIDTH - MIN_PULSE_WIDTH)
self.pwm_write(pin, int(pulse_width), MAX_PERIOD)
def pwm_write(self, pin_number, value, period):
if not pin_number in self.pins["pwm"]:
pin = mraa.Pwm(pin_number)
self.pins["pwm"][pin_number] = pin
else:
pin = self.pins["pwm"][pin_number]
pin.period_us(period)
# The pin needs to be enabled if it wasn't already
pin.enable(True)
pin.pulsewidth_us(value)
def digital_write(self, pin_number, value):
if not pin_number in self.pins["digital"]:
pin = mraa.Gpio(pin_number)
self.pins["digital"][pin_number] = pin
else:
pin = self.pins["digital"][pin_number]
pin.dir(mraa.DIR_OUT)
pin.write(value)
def digital_read(self, pin_number):
if not pin_number in self.pins["digital"]:
pin = mraa.Gpio(pin_number)
self.pins["digital"][pin_number] = pin
else:
pin = self.pins["digital"][pin_number]
pin.dir(mraa.DIR_IN)
return pin.read()
def analog_read(self, pin_number):
if not pin_number in self.pins["analog"]:
pin = mraa.Aio(pin_number)
self.pins["analog"][pin_number] = pin
else:
pin = self.pins["analog"][pin_number]
return pin.read()
def i2c_write(self, pin_number, address, register, data):
"""
Requires a pin number, device address, a register to write to,
and the data to write to the register.
"""
if not self.pins["i2c"]:
bus = mraa.I2c(pin_number)
self.pins["i2c"] = bus
else:
bus = self.pins["i2c"]
bus.address(address)
bus.writeReg(register, data) | zorg-edison | /zorg-edison-0.0.2.tar.gz/zorg-edison-0.0.2/zorg_edison/adaptor.py | adaptor.py |
# zorg-emic
[](https://gitter.im/zorg/zorg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://pypi.python.org/pypi/zorg-emic/)
[](https://travis-ci.org/zorg/zorg-emic)
[](http://zorg-emic.readthedocs.org/en/latest/?badge=latest)
This is a python driver for controlling the [Emic2](https://www.sparkfun.com/products/11711) text to speech module with Zorg. Zorg (https://zorg.github.io/) is a Python framework for robotics and physical computing.
## Description
This driver allows commands to be sent to the Emic 2 text to speech module using a serial connection.
The driver automatically queues commands and asynchronously transmits them to the text to speech module when it is ready to process them.
More information can be found in the project [documentation](http://zorg-emic.rtfd.org).
## Examples
See examples for using this library in the [examples](https://github.com/zorg/zorg-emic/tree/master/examples) directory.
## Getting Started
Install the module with: `pip install zorg-emic`
## License
[Copyright (c) 2015 Team Zorg](https://github.com/zorg/zorg-emic/blob/master/LICENSE.md)
| zorg-emic | /zorg-emic-0.0.3.tar.gz/zorg-emic-0.0.3/README.md | README.md |
from zorg.driver import Driver
from multiprocessing import Queue
from threading import Thread
import time
class Emic2(Driver):
def __init__(self, options, connection):
super(Emic2, self).__init__(options, connection)
self.currentAction = 'idle'
self.queue = Queue()
self.thread = Thread(target=self.watch, args=())
self.thread.daemon = True
self.commands += [
"speak", "set_voice", "set_language",
"set_volume", "set_rate", "set_parser",
"pause", "stop"
]
def watch(self):
while True:
waiting = True
# Wait if the queue is empty
if self.queue.empty():
time.sleep(0.5)
continue
while waiting:
self.connection.serial.write("\n")
time.sleep(0.3)
data = self.connection.serial_read()
# The Emic 2 transmits a ":" when ready to receive commands
if data == ':':
value = self.queue.get()
self.connection.serial_write("%s\n" % (value))
waiting = False
time.sleep(0.5)
self.connection.disconnect()
def start(self):
self.connection.connect()
# Setup involves writing a new line to initialize the board
self.connection.serial_write('\n')
# Pause for 500 milliseconds
time.sleep(0.05)
# Start a background thread to process items in the queue
self.thread.start()
def is_valid_string(self, text):
"""
The Emic 2 expects characters that conform to the ISO-8859-1 Latin
character set. This method will return false if a string is not
ISO-8859-1 compatible.
"""
return all(ord(character) < 128 for character in text)
def word_wrap(self, text, width=1023):
"""
A simple word wrapping greedy algorithm that puts
as many words into a single string as possible.
"""
substrings = []
string = text
while len(string) > width:
index = width - 1
while not string[index].isspace():
index = index - 1
line = string[0:index]
substrings.append(line)
string = string[index + 1:]
substrings.append(string)
return substrings
def speak(self, text):
"""
The main function to convert text into speech.
"""
if not self.is_valid_string(text):
raise Exception("%s is not ISO-8859-1 compatible." % (text))
# Maximum allowable 1023 characters per message
if len(text) > 1023:
lines = self.word_wrap(text, width=1023)
for line in lines:
self.queue.put("S%s" % (line))
else:
self.queue.put("S%s" % (text))
def set_voice(self, voice):
"""
Change between 9 available voices on the Emic2.
0: Perfect Paul (Paulo)
1: Huge Harry (Francisco)
2: Beautiful Betty
3: Uppity Ursula
4: Doctor Dennis (Enrique)
5: Kit the Kid
6: Frail Frank
7: Rough Rita
8: Whispering Wendy (Beatriz)
"""
self.currentAction = 'setting voice'
self.queue.put('N%d' % (voice))
def set_language(self, language, dialect=None):
"""
Set the language used for TTS.
en: English
es: Spanish | [ lan: latino or ca: castilian ]
"""
self.currentAction = 'setting language'
l = 0
if language == 'en':
l = 0
elif language == 'es':
l = 1
if dialect == 'ca':
l = 2
self.queue.put('l%s' % (l))
def set_volume(self, volume):
"""
Set the volume of the Emic 2.
Volume range [-48 to 18]
-48 (softest) to 18 (loudest)
"""
self.currentAction = 'setting volume'
self.queue.put('V%d' % (volume))
def set_rate(self, rate):
"""
Set the speaking rate in words per minute.
From 75 (slowest) to 600 (fastest).
Default value: 200.
"""
self.currentAction = 'setting rate'
self.queue.put('W%d' % (rate))
def set_parser(self, parser):
"""
Select either the Epson or DECtalk text parsing engine.
0 DECtalk
1 Epson (default)
"""
self.queue.put('P%d' % (parser))
def pause(self):
"""
Immediately pause current message.
"""
self.currentAction = 'paused'
self.queue.put('Z')
def stop(self):
"""
Immediately stop the current message from being spoken.
This command is only valid while a message is playing.
"""
self.currentAction = 'stopped'
self.queue.put('X')
def reset(self):
"""
Reset the current message beign spoken.
"""
self.currentAction = 'resetting'
self.queue.put('R') | zorg-emic | /zorg-emic-0.0.3.tar.gz/zorg-emic-0.0.3/zorg_emic/emic2.py | emic2.py |
# Zorg Firmata
[](https://gitter.im/zorg/zorg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://pypi.python.org/pypi/zorg-firmata/)
[](https://requires.io/github/zorg/zorg-firmata/requirements/?branch=master)
[](https://travis-ci.org/zorg/zorg-firmata)
[](https://codeclimate.com/github/zorg/zorg-firmata)
[](https://coveralls.io/github/zorg/zorg-firmata?branch=master)
## What is Firmata?
> Firmata is a protocol for communicating with microcontrollers from software
> on a computer (or smartphone/tablet, etc). The protocol can be implemented
> in firmware on any microcontroller architecture as well as software on any
> computer software package (see list of client libraries below).
> ~ [Firmata Protocol Documentation](https://github.com/firmata/protocol)
## Installation
You can install this library on your machine using PIP.
```
pip install zorg-firmata
```
## Setup
To use this library with your microcontroller, you will need to load the
Standard Firmata software onto it first. See [Uploading StandardFirmata To Arduino](https://github.com/MrYsLab/pymata-aio/wiki/Uploading-StandardFirmata-To-Arduino) for an example of how to do this.
## Examples
Several examples for using the `zorg-firmata` module are available on GitHub.
https://github.com/zorg/zorg-firmata/tree/master/examples
## Notes
This module wraps the [PyMata](https://github.com/MrYsLab/PyMata) library to
provide Firmata support within the Zorg robotics framework.
| zorg-firmata | /zorg-firmata-0.0.4.tar.gz/zorg-firmata-0.0.4/README.md | README.md |
from zorg.adaptor import Adaptor
from PyMata.pymata import PyMata
import time
import sys
import signal
class Firmata(Adaptor):
def __init__(self, options):
super(Firmata, self).__init__(options)
if 'port' not in options:
raise self.ParameterRequired(
'A port must be specified for Firmata connection.'
)
self.port = options.get('port')
self.board = PyMata('/dev/ttyACM0', verbose=True)
signal.signal(signal.SIGINT, self.signal_handler)
self.pins = {
'digital': [],
'analog': [],
'pwm': [],
'servo': [],
'i2c': [],
}
def analog_write(self, pin_number, value):
if pin_number not in self.pins['analog']:
self.pins['analog'].append(pin_number)
self.board.set_pin_mode(
pin_number,
self.board.OUTPUT,
self.board.ANALOG
)
self.board.analog_write(pin_number, value)
def analog_read(self, pin_number):
if pin_number not in self.pins['analog']:
self.pins['analog'].append(pin_number)
self.board.set_pin_mode(
pin_number,
self.board.INPUT,
self.board.ANALOG
)
return self.board.analog_read(pin_number)
def digital_write(self, pin_number, value):
if pin_number not in self.pins['digital']:
self.pins['digital'].append(pin_number)
self.board.set_pin_mode(
pin_number,
self.board.OUTPUT,
self.board.DIGITAL
)
self.board.digital_write(pin_number, value)
def digital_read(self, pin_number):
if pin_number not in self.pins['digital']:
self.pins['digital'].append(pin_number)
self.board.set_pin_mode(
pin_number,
self.board.INPUT,
self.board.DIGITAL
)
return self.board.analog_write(pin_number)
def pwm_write(self, pin_number, value):
if pin_number not in self.pins['pwm']:
self.pins['pwm'].append(pin_number)
self.board.set_pin_mode(
pin_number,
self.board.PWM,
self.board.DIGITAL
)
return self.board.analog_write(pin_number, value)
def pwm_read(self, pin_number):
if pin_number not in self.pins['pwm']:
self.pins['pwm'].append(pin_number)
self.board.set_pin_mode(
pin_number,
self.board.PWM,
self.board.DIGITAL
)
return self.board.analog_read(pin_number)
def servo_write(self, pin_number, value):
if pin_number not in self.pins['servo']:
self.pins['servo'].append(pin_number)
self.board.servo_config(pin_number)
self.board.analog_write(pin_number, value)
def disconnect(self):
# Close the firmata interface down cleanly
self.board.close()
def signal_handler(self, sig, frame):
print('Ctrl+C pressed')
if self.board is not None:
self.board.reset()
sys.exit(0)
class ParameterRequired(Exception):
def __init__(self, message='A required parameter was not provided.'):
super(Firmata.ParameterRequired, self).__init__(message)
def __str__(self):
return self.message | zorg-firmata | /zorg-firmata-0.0.4.tar.gz/zorg-firmata-0.0.4/zorg_firmata/adaptor.py | adaptor.py |
zorg-gpio
=========
|Join the chat at https://gitter.im/zorg/zorg| |Package Version|
|Requirements Status| |Build Status| |Code Climate| |Coverage Status|
Zorg (https://zorg.github.io/) is a Python framework for robotics and
physical computing.
This module provides drivers for `General Purpose Input/Output
(GPIO) <https://en.wikipedia.org/wiki/General_Purpose_Input/Output>`__
devices. Typically, this library is registered by an adaptor class such
as ```zorg-edison`` <https://github.com/zorg/zorg-edison>`__ that
supports the needed interfaces for GPIO devices.
Getting Started
---------------
Install the module with: ``pip install zorg zorg-gpio``
`Documentation <http://zorg-gpio.readthedocs.org/>`__
-----------------------------------------------------
Example
-------
.. code:: python
import time
import zorg
def blink_led(my):
while True:
my.led.toggle()
time.sleep(100)
robot = zorg.robot({
"name": "Test",
"connections": {
"edison": {
"adaptor": "zorg_edison.Edison",
},
},
"devices": {
"led": {
"connection": "edison",
"driver": "zorg_gpio.Led",
"pin": 4, # Digital pin 4
},
},
"work": blink_led,
})
robot.start()
Hardware Support
----------------
Zorg has a extensible system for connecting to hardware devices. The
following GPIO devices are currently supported:
- `Light sensor <docs/light_sensor.md>`__
- `Button <docs/button.md>`__
- `Analog Sensor <docs/analog_sensor.md>`__
- `Digital Sensor <docs/digital_sensor.md>`__
- `LED <docs/led.md>`__
- `Relay <docs/relay.md>`__
- `Buzzer <docs/buzzer.md>`__
`Open a new issue <https://github.com/zorg/zorg-gpio/issues/new>`__ to
request support for additional components.
License
-------
`Copyright (c) 2015 Team
Zorg <https://github.com/zorg/zorg/blob/master/LICENSE.md>`__
.. |Join the chat at https://gitter.im/zorg/zorg| image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/zorg/zorg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
.. |Package Version| image:: https://img.shields.io/pypi/v/zorg-gpio.svg
:target: https://pypi.python.org/pypi/zorg-gpio/
.. |Requirements Status| image:: https://requires.io/github/zorg/zorg-gpio/requirements.svg?branch=master
:target: https://requires.io/github/zorg/zorg-gpio/requirements/?branch=master
.. |Build Status| image:: https://travis-ci.org/zorg/zorg-gpio.svg?branch=master
:target: https://travis-ci.org/zorg/zorg-gpio
.. |Code Climate| image:: https://codeclimate.com/github/zorg/zorg-gpio/badges/gpa.svg
:target: https://codeclimate.com/github/zorg/zorg-gpio
.. |Coverage Status| image:: https://coveralls.io/repos/github/zorg/zorg-gpio/badge.svg?branch=master
:target: https://coveralls.io/github/zorg/zorg-gpio?branch=master
| zorg-gpio | /zorg-gpio-0.0.7.tar.gz/zorg-gpio-0.0.7/README.rst | README.rst |
# zorg-gpio
[](https://gitter.im/zorg/zorg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[](https://pypi.python.org/pypi/zorg-gpio/)
[](https://requires.io/github/zorg/zorg-gpio/requirements/?branch=master)
[](https://travis-ci.org/zorg/zorg-gpio)
[](https://codeclimate.com/github/zorg/zorg-gpio)
[](https://coveralls.io/github/zorg/zorg-gpio?branch=master)
Zorg (https://zorg.github.io/) is a Python
framework for robotics and physical computing.
This module provides drivers for [General Purpose Input/Output (GPIO)](https://en.wikipedia.org/wiki/General_Purpose_Input/Output) devices. Typically, this library is registered by an adaptor class such as [`zorg-edison`](https://github.com/zorg/zorg-edison) that supports the needed interfaces for GPIO devices.
## Getting Started
Install the module with: `pip install zorg zorg-gpio`
## [Documentation](http://zorg-gpio.readthedocs.org/)
## Example
```python
import time
import zorg
def blink_led(my):
while True:
my.led.toggle()
time.sleep(100)
robot = zorg.robot({
"name": "Test",
"connections": {
"edison": {
"adaptor": "zorg_edison.Edison",
},
},
"devices": {
"led": {
"connection": "edison",
"driver": "zorg_gpio.Led",
"pin": 4, # Digital pin 4
},
},
"work": blink_led,
})
robot.start()
```
## Hardware Support
Zorg has a extensible system for connecting to hardware devices.
The following GPIO devices are currently supported:
- [Light sensor](docs/light_sensor.md)
- [Button](docs/button.md)
- [Analog Sensor](docs/analog_sensor.md)
- [Digital Sensor](docs/digital_sensor.md)
- [LED](docs/led.md)
- [Relay](docs/relay.md)
- [Buzzer](docs/buzzer.md)
[Open a new issue](https://github.com/zorg/zorg-gpio/issues/new) to request support for additional components.
## License
[Copyright (c) 2015 Team Zorg](https://github.com/zorg/zorg/blob/master/LICENSE.md)
| zorg-gpio | /zorg-gpio-0.0.7.tar.gz/zorg-gpio-0.0.7/README.md | README.md |
zorg-grove
==========
|Documentation Status| |Join the chat at
https://gitter.im/zorg-framework/zorg|
This module implements drivers for controling devices using the
`Zorg <https://github.com/zorg/zorg>`__ framework for robotics and
physical computing.
`Documentation <http://zorg-grove.readthedocs.org/>`__
------------------------------------------------------
Hardware Support
================
Zorg has a extensible system for connecting to hardware devices. The
following Grove sensors and devices are currently supported:
- `LCD screen <docs/LCD.rst>`__
- `Temperature sensor <docs/temperature_sensor.rst>`__
- `Microphone <docs/microphone.rst>`__
- `Rotary Angle Sensor <docs/rotary_angle_sensor.rst>`__
- `Servo <docs/servo.rst>`__
`Open a new issue <https://github.com/zorg/zorg-grove/issues/new>`__ to
request support for additional components.
.. |Documentation Status| image:: https://readthedocs.org/projects/zorg-grove/badge/?version=latest
:target: http://zorg-grove.readthedocs.org
.. |Join the chat at https://gitter.im/zorg-framework/zorg| image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/zorg-framework/zorg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
| zorg-grove | /zorg-grove-0.0.4.tar.gz/zorg-grove-0.0.4/README.rst | README.rst |
# zorg-grove
[](https://travis-ci.org/zorg/zorg-grove)
[](http://zorg-grove.readthedocs.org)
[](https://gitter.im/zorg-framework/zorg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
This module implements drivers for controling devices using the
[Zorg](https://github.com/zorg/zorg) framework for robotics
and physical computing.
## [Documentation](http://zorg-grove.readthedocs.org/)
# Hardware Support
Zorg has a extensible system for connecting to hardware devices.
The following Grove sensors and devices are currently supported:
- [LCD screen](docs/LCD.rst)
- [Temperature sensor](docs/temperature_sensor.rst)
- [Microphone](docs/microphone.rst)
- [Rotary Angle Sensor](docs/rotary_angle_sensor.rst)
- [Servo](docs/servo.rst)
[Open a new issue](https://github.com/zorg/zorg-grove/issues/new) to request support for additional components.
| zorg-grove | /zorg-grove-0.0.4.tar.gz/zorg-grove-0.0.4/README.md | README.md |
from zorg.driver import Driver
from time import sleep
# i2c commands
CLEARDISPLAY = 0x01
RETURNHOME = 0x02
ENTRYMODESET = 0x04
DISPLAYCONTROL = 0x08
CURSORSHIFT = 0x10
FUNCTIONSET = 0x20
SETCGRAMADDR = 0x40
SETDDRAMADDR = 0x80
# Flags for display entry mode
ENTRYRIGHT = 0x00
ENTRYLEFT = 0x02
ENTRYSHIFTINCREMENT = 0x01
ENTRYSHIFTDECREMENT = 0x00
# Flags for display on/off control
DISPLAYON = 0x04
DISPLAYOFF = 0x00
CURSORON = 0x02
CURSOROFF = 0x00
BLINKON = 0x01
BLINKOFF = 0x00
# Flags for display/cursor shift
DISPLAYMOVE = 0x08
CURSORMOVE = 0x00
MOVERIGHT = 0x04
MOVELEFT = 0x00
# Flags for function set
EIGHTBITMODE = 0x10
FOURBITMODE = 0x00
TWOLINE = 0x08
ONELINE = 0x00
FIVExTENDOTS = 0x04
FIVExEIGHTDOTS = 0x00
En = 0x04 # Enable bit
Rw = 0x02 # Read/Write bit
Rs = 0x01 # Register select bit
# I2C addresses for LCD and RGB backlight
DISPLAY_COLOR_ADDRESS = 0x62
DISPLAY_TEXT_ADDRESS = 0x3e
class LCD(Driver):
def __init__(self, options, connection):
super(LCD, self).__init__(options, connection)
self._displayfunction = FOURBITMODE | TWOLINE | FIVExEIGHTDOTS
self._displaycontrol = DISPLAYON | CURSOROFF | BLINKOFF
self._displaymode = ENTRYLEFT | ENTRYSHIFTDECREMENT
self.bus = options.get("bus", 0)
self.commands = [
"clear", "home", "set_cursor", "display_off",
"display_on", "cursor_off", "cursor_on", "blink_off",
"blink_on", "backlight_off", "backlight_on", "print_string"
]
def start(self):
# Initialize backlight, set to black (off)
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x00, 0)
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x01, 0)
# Currently magic values, but needed to write to rgb.
# Pulled from https://github.com/DexterInd/GrovePi/blob/master/Software/Python/grove_rgb_lcd/grove_rgb_lcd.py
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x08, 0xaa)
# 0x04 = R, 0x03 = G, 0x02 = B
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x04, 0)
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x03, 0)
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x02, 0)
sleep(0.04)
self._write4bits(0x03 << 4)
sleep(0.04)
self._write4bits(0x03 << 4)
sleep(0.04)
self._write4bits(0x03 << 4)
self._write4bits(0x02 << 4)
self._sendCommand(FUNCTIONSET | self._displayfunction)
self.display_on()
self.clear()
# Initialize to default text direction (for roman languages), set entry mode
self._sendCommand(ENTRYMODESET | self._displaymode)
self.home()
def clear(self):
"""
Clears display and returns cursor to the home position (address 0).
"""
self._sendCommand(CLEARDISPLAY)
sleep(0.05)
def home(self):
"""
Returns cursor to home position.
"""
self._sendCommand(RETURNHOME)
sleep(0.05)
def set_cursor(self, col, row):
"""
Sets cursor position.
"""
row_offsets = [0x00, 0x40, 0x14, 0x54]
self._sendCommand(SETDDRAMADDR | (col + row_offsets[row]))
def display_off(self):
"""
Sets Off of all display (D), cursor Off (C) and
blink of cursor position character (B).
"""
self._displaycontrol &= ~DISPLAYON
self._sendCommand(DISPLAYCONTROL | self._displaycontrol)
def display_on(self):
"""
Sets On of all display (D), cursor On (C) and
blink of cursor position character (B).
"""
self._displaycontrol |= DISPLAYON
self._sendCommand(DISPLAYCONTROL | self._displaycontrol)
def cursor_off(self):
"""
Turns off the cursor.
"""
self._displaycontrol &= ~CURSORON
self._sendCommand(DISPLAYCONTROL | self._displaycontrol)
def cursor_on(self):
"""
Turns on the cursor.
"""
self._displaycontrol |= CURSORON
self._sendCommand(DISPLAYCONTROL | self._displaycontrol)
def blink_off(self):
"""
Turns off the cursor blinking character.
"""
self._displaycontrol &= ~BLINKON
self._sendCommand(DISPLAYCONTROL | self._displaycontrol)
def blink_on(self):
"""
Turns on the cursor blinking character.
"""
self._displaycontrol |= BLINKON
self._sendCommand(DISPLAYCONTROL | self._displaycontrol)
def backlight_off(self):
"""
Turns off the back light. Does so by setting the
color to black.
"""
self.backlight_color(0, 0, 0)
def backlight_on(self):
"""
Turns on the back light. Does so by setting the
color to white
"""
self.backlight_color(255, 255, 255)
def backlight_color(self, red, green, blue):
"""
Set RGB color for the back light.
"""
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x04, red)
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x03, green)
self.connection.i2c_write(self.bus, DISPLAY_COLOR_ADDRESS, 0x02, blue)
def print_string(self, characters):
"""
Prints characters on the LCD.
Automatically wraps text to fit 16 character wide display.
"""
# Clear the display
self.clear()
self._sendCommand(0x08|0x04) # display on, no cursor
self._sendCommand(0x28) # 2 lines
sleep(0.05)
count = 0
row=0
for c in characters:
if c == '\n':
count = 0
row = 1
self._sendCommand(0xc0)
continue
if count == 16 and row == 0:
self._sendCommand(0xc0)
row += 1
count += 1
self.connection.i2c_write(self.bus, DISPLAY_TEXT_ADDRESS, 0x40, ord(c))
def _write4bits(self, val):
self._pulseEnable(val)
def _pulseEnable(self, data):
a = data | En
sleep(0.0001)
b = data & ~En
sleep(0.05)
def _sendCommand(self, value):
self.connection.i2c_write(self.bus, DISPLAY_TEXT_ADDRESS, 0x80, value)
def _writeData(self, value):
self._sendData(value, Rs)
def _sendData(self, val, mode):
highnib = val & 0xf0
lownib = (val << 4) & 0xf0
self._write4bits(highnib | mode)
self._write4bits(lownib | mode) | zorg-grove | /zorg-grove-0.0.4.tar.gz/zorg-grove-0.0.4/zorg_grove/lcd.py | lcd.py |
# Zorg Network Camera
[](https://pypi.python.org/pypi/zorg-network-camera/)
[](https://travis-ci.org/zorg/zorg-network-camera)
[](https://coveralls.io/github/zorg/zorg-network-camera?branch=master)
This module contains device adaptors and drivers that make it possible
to connect network cameras to your robot.
## Installation
```
pip install zorg-network-camera
```
## Network Camera Adaptor
This module's network camera adaptor handles the basic functions of
retrieving images from the remote camera when necessary. Because this
module is intended to run on devices such as the Intel Edison or the
Raspberry Pi, the adaptor has been designed with additional features
for efficiency. Mainly, if an images are cached until the image from
the remote camera has been updated. This ensures that an image will
not be unnecessarily downloaded onto the device.
### Camera Feed Driver
The `Feed` module provides a driver for accessing the url to of the
remote camera's image. This is a convenience method intended to be
used to load the image into your application.
### Light Sensor Driver
The `LightSensor` module allows you to use the camera as a light sensor.
This driver provides the ability to get the average lighting level from
the camera.
### OCR Driver
Optical character recognition (OCR) is the process of programmatically
converting images of typed, handwritten or printed text into machine-encoded
text. The `OCR` driver module provided in this package provides a utility that
makes it possible for your robot to process written text that it sees.
The OCR driver achieves the process of extracting text from an image through
the use of the open source [Tesseract OCR](https://github.com/tesseract-ocr/tesseract)
module. Tesseract is an optical character recognition engine origionally
developed by Hewlett-Packard. Development of Tesseract has been
[sponsored by Google since 2006](http://googlecode.blogspot.com/2006/08/announcing-tesseract-ocr.html).
| zorg-network-camera | /zorg-network-camera-0.0.3.tar.gz/zorg-network-camera-0.0.3/README.md | README.md |
from zorg.adaptor import Adaptor
import os
# Check urllib for Python 2 or 3 compatability
try:
from urllib.parse import urlsplit
from urllib import request as urllib_request
except ImportError:
from urlparse import urlsplit
import urllib2 as urllib_request
class Camera(Adaptor):
def __init__(self, options):
super(Camera, self).__init__(options)
self.url = options.get("url", "")
self.cache_directory = options.get("cache_directory", ".cache")
self.image_last_modified = None
def download_image(self):
"""
Download the image and return the
local path to the image file.
"""
split = urlsplit(self.url)
filename = split.path.split("/")[-1]
# Ensure the directory to store the image cache exists
if not os.path.exists(self.cache_directory):
os.makedirs(self.cache_directory)
filepath = os.path.join(self.cache_directory, filename)
data = urllib_request.urlopen(self.url)
with open(filepath, "wb") as image:
image.write(data.read())
return filepath
def has_changed(self):
"""
Method to check if an image has changed
since it was last downloaded. By making
a head request, this check can be done
quicker that downloading and processing
the whole file.
"""
request = urllib_request.Request(self.url)
request.get_method = lambda: 'HEAD'
response = urllib_request.urlopen(request)
information = response.info()
if 'Last-Modified' in information:
last_modified = information['Last-Modified']
# Return False if the image has not been modified
if last_modified == self.image_last_modified:
return False
self.image_last_modified = last_modified
# Return True if the image has been modified
# or if the image has no last-modified header
return True | zorg-network-camera | /zorg-network-camera-0.0.3.tar.gz/zorg-network-camera-0.0.3/zorg_network_camera/adaptor.py | adaptor.py |
from zorg.driver import Driver
class MotorShield(Driver):
"""
Driver for the Seeed Motor Shield.
"""
def __init__(self, options, connection):
super(MotorShield, self).__init__(options, connection)
self.pins = {
'right_motor_speed': 10,
'right_motor': [12, 13],
'left_motor_speed': 9,
'left_motor': [8, 11]
}
self.right_motor_speed = 0
self.left_motor_speed = 0
self.commands += [
'set_right_motor_speed',
'get_right_motor_speed',
'set_right_motor_direction',
'set_left_motor_speed',
'get_left_motor_speed',
'set_left_motor_direction'
]
def set_right_motor_speed(self, speed):
"""
Set the speed of the right side motor.
Speed should be an integer 0 to 255.
"""
self.right_motor_speed = speed
self.connection.pwm_write(
self.pins['right_motor_speed'],
speed
)
def get_right_motor_speed(self):
return self.right_motor_speed
def set_right_motor_direction(self, direction):
"""
Takes a 0 or a 1 value to set the motor direction
clockwise or counter-clockwise.
"""
if direction == 0:
# Set the motor direction clockwise
self.connection.digital_write(self.pins['right_motor'][0], 0)
self.connection.digital_write(self.pins['right_motor'][1], 1)
elif direction == 1:
# Set the motor direction counter-clockwise
self.connection.digital_write(self.pins['right_motor'][0], 1)
self.connection.digital_write(self.pins['right_motor'][1], 0)
else:
# Stop the motor
self.connection.digital_write(self.pins['right_motor'][0], 0)
self.connection.digital_write(self.pins['right_motor'][1], 0)
def set_left_motor_speed(self, speed):
"""
Set the speed of the left side motor.
Speed should be an integer 0 to 255.
"""
self.left_motor_speed = speed
self.connection.pwm_write(
self.pins['left_motor_speed'],
speed
)
def get_left_motor_speed(self):
return self.left_motor_speed
def set_left_motor_direction(self, direction):
"""
Takes a 0 or a 1 value to set the motor direction
clockwise or counter-clockwise.
"""
if direction == 0:
# Set the motor direction clockwise
self.connection.digital_write(self.pins['left_motor'][0], 0)
self.connection.digital_write(self.pins['left_motor'][1], 1)
elif direction == 1:
# Set the motor direction counter-clockwise
self.connection.digital_write(self.pins['left_motor'][0], 1)
self.connection.digital_write(self.pins['left_motor'][1], 0)
else:
# Stop the motor
self.connection.digital_write(self.pins['left_motor'][0], 0)
self.connection.digital_write(self.pins['left_motor'][1], 0) | zorg-seeed | /zorg-seeed-0.0.1.tar.gz/zorg-seeed-0.0.1/zorg_seeed/motor_shield.py | motor_shield.py |
Zorg
====
|Join the chat at https://gitter.im/zorg-framework/zorg|
Zorg is a Python framework for robotics and physical computing. It is
based on `Cylon.js <https://github.com/hybridgroup/cylon/>`__, a
JavaScript framework for robotics.
Getting started
---------------
Installation
~~~~~~~~~~~~
All you need to get Zorg up and running is the ``zorg`` package:
::
pip install zorg
*You may need to `copy the
source <https://github.com/gunthercox/zorg/archive/master.zip>`__ if
your device does not support `pip <https://pip.pypa.io/en/stable/>`__.*
You should also install the packages for the hardware you are looking to
support. In our examples, we will be using the `Intel
Edison <https://www-ssl.intel.com/content/www/us/en/do-it-yourself/edison.html>`__
and an LED, so we need the ``edison`` and ``gpio`` packages:
::
pip install zorg-gpio zorg-edison
Examples
--------
Intel Edison and an LED
~~~~~~~~~~~~~~~~~~~~~~~
This example controls an LED connected to the Intel Edison and blinks it
once every 500 milliseconds. This program should be run on the Intel
Edison itself.
.. code:: python
import zorg
def work (my):
while True:
# Toggle the LED
my.led.toggle()
# Wait 100ms before doing it again
time.sleep(0.1)
robot = zorg.robot({
"connections": {
"edison": {
"adaptor": "zorg_edison.Edison",
},
},
"devices": {
"led": {
"connection": "edison",
"driver": "zorg_gpio.Led",
"pin": 13, # 13 is the on-board LED
}
},
"name": "example", # Give your robot a unique name
"work": work, # The method (on the main level) where the work will be done
})
.. |Join the chat at https://gitter.im/zorg-framework/zorg| image:: https://badges.gitter.im/Join%20Chat.svg
:target: https://gitter.im/zorg-framework/zorg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
| zorg | /zorg-0.0.5.tar.gz/zorg-0.0.5/README.rst | README.rst |
Copyright (c) 2016 Team Zorg (Kevin Brown, Gunther Cox, Tyler Redzko, and Owen Apile)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
| zorg | /zorg-0.0.5.tar.gz/zorg-0.0.5/LICENSE.md | LICENSE.md |
# Zorg
[](https://gitter.im/zorg-framework/zorg?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Zorg is a Python framework for robotics and physical computing. It is based on
[Cylon.js][cylon.js], a JavaScript framework for robotics.
## Getting started
### Installation
All you need to get Zorg up and running is the `zorg` package:
```
pip install zorg
```
_You may need to [copy the source][zorg-source] if your device does not
support [pip][pip]._
You should also install the packages for the hardware you are looking to support. In our examples, we will be using the [Intel Edison][intel-edison] and an LED, so we need the `edison` and `gpio` packages:
```
pip install zorg-gpio zorg-edison
```
## Examples
### Intel Edison and an LED
This example controls an LED connected to the Intel Edison and blinks it once every 500 milliseconds. This program should be run on the Intel Edison itself.
```python
import zorg
def work (my):
while True:
# Toggle the LED
my.led.toggle()
# Wait 100ms before doing it again
time.sleep(0.1)
robot = zorg.robot({
"connections": {
"edison": {
"adaptor": "zorg_edison.Edison",
},
},
"devices": {
"led": {
"connection": "edison",
"driver": "zorg_gpio.Led",
"pin": 13, # 13 is the on-board LED
}
},
"name": "example", # Give your robot a unique name
"work": work, # The method (on the main level) where the work will be done
})
```
[cylon.js]: https://github.com/hybridgroup/cylon/
[intel-edison]: https://www-ssl.intel.com/content/www/us/en/do-it-yourself/edison.html
[pip]: https://pip.pypa.io/en/stable/
[zorg-source]: https://github.com/gunthercox/zorg/archive/master.zip
| zorg | /zorg-0.0.5.tar.gz/zorg-0.0.5/README.md | README.md |
# Zorglangue Traductor
A Python module that allows you to traduce the fake language Zorglangue from the Spirou comic strips. Works from a Latin language (originally French) to Zorglangue and vice versa. Eviv Bulgroz !
## Installation
```
py -m pip install zorglangue-traductor
```
## How to use it
Simply import the module into your Python 3 program, and you will be able to use the function `zorglonde` to traduce your string. The function takes a string as parameter and returns a string.
```
# Zorglangue Program
# by Raphaël DENNI
# Import
import zorglangue_traductor as zt
# Code
print("--- Zorglangue program ---\n--- by Raphaël DENNI ---")
while True:
string = input("\nEnter your string : ")
zorg_string = zt.zorglonde(string)
print(f"\nEviv Bulgroz : {zorg_string})
input("\nPress enter for an another phrase or shutdown the program.")
```
## Results example
 | zorglangue-traductor | /zorglangue_traductor-2.0.0.tar.gz/zorglangue_traductor-2.0.0/README.md | README.md |
# Imports functions from shift.py and lower_upper.py
from src.zorglangue_traductor.utils.shift import shift
from src.zorglangue_traductor.utils.lower_upper import lower_upper
# Function that translates a string into Zorglangue
# Fun fact: Zorglonde is the name of the waves that transform people into Zorglhommes who speak Zorglangue
def zorglonde(string):
string = string.split(" ") # Split the string with spaces as separators to be able to reverse each word separately
zorg_string = []
punctuation_list = [",", ";", ":", ".", "?", "!"]
# The next lines reverse each word of the string individually because they need to stay in the same order
for i in range(0, len(string), 1):
temp_list = list(string[i])
if len(temp_list) > 1: # The next lines are not executed if the word is only one letter long to avoid errors
temp_list.reverse()
# The next lines shift common ponctuations characters to the right after reversing a word,
# because the function reverse() reverses these characters in a string
# while they must remain at the same place.
# This is necessary to correspond to the punctuation of the Zorglangue language (see shift.py)
for j in punctuation_list:
shift(temp_list, j)
# The following case is specific to the apostrophe character
# because it is the only character that is shifted to the left with another character
# (e.g. with this, "l'appareil" becomes "l'lierappa" and not "lierappa'l")
shift(temp_list, "'", 0, True)
# The next line makes the letters lowercase and if needed capitalizes the first letter of the new string,
# because the function reverse() places the first letter, which is commonly capitalized,
# at the end of the word/string.
# This is necessary to correspond to the letter capitalization of the Zorglangue language (see lower_upper.py)
lower_upper(temp_list)
zorg_string.extend(temp_list)
zorg_string.append(" ")
return "".join(zorg_string)
"""
MIT License
Copyright (c) 2023 Raphaël Denni
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
""" | zorglangue-traductor | /zorglangue_traductor-2.0.0.tar.gz/zorglangue_traductor-2.0.0/src/zorglangue_traductor/main.py | main.py |
# zōritori 草履取り
yet another tool to help Japanese language learners read text in video games
## features
* annotate kanji with furigana
* color code proper nouns (like NHK News Web Easy)
* look up words on mouse hover, or open Jisho or Wikipedia
* automatically collect vocabulary with context
* (optional) English subtitles via machine translation

This is a work in progress and is rough around the edges.
## requirements:
* Windows, Linux, or Mac (tested on Windows 10, Ubuntu 22.04, and macOS Montery)
* Python 3.10.x (tested with 3.10.9)
* either Tesseract or a Google Cloud Vision API account
* *(optional) DeepL API account for machine translated subtitles*
* *(Linux only) scrot, python3-tk, python3-dev. X11 only for now, Wayland may not work*
## installation:
on Windows, the following worked:
* install Python 3.10.x
* install `zoritori` via pip (optionally via pipx)
on Mac and Linux, I ran into numerous issues installing via pip/pipx, so for now use the development process:
* install Python 3.10.9 (recommended via pyenv)
* install [Poetry](https://python-poetry.org/)
* clone this repo
* install dependencies via `poetry install`
* when running `zoritori`, use `poetry run python -m zoritori ...` (see below for command line args)
for all platforms:
* download the example config file from [here](https://github.com/okonomichiyaki/zoritori/blob/main/config.ini)
* if using Tesseract, [follow these instructions](https://github.com/tesseract-ocr/tesseract) to install it, then configure it by specifying the path to the `tesseract` binary in `config.ini`
* if using Google Cloud Vision, [follow these steps](https://cloud.google.com/vision/docs/detect-labels-image-client-libraries) to create a project and download a credentials JSON file. then add that as an environment variable. Windows: `$env:GOOGLE_APPLICATION_CREDENTIALS="C:\path\to\json"`, Mac/Linux: `export GOOGLE_APPLICATION_CREDENTIALS=/path/to/json`
## usage
* start: `zoritori -e <tesseract|google> -c /path/to/config.ini`
* an invisible window (with title "zoritori") should appear. make sure this window has focus
* identify the region of the screen containing text you want to read
* using your mouse, (left) click and drag a rectangle around the text
* after a moment, you should see furigana over any kanji in the region, and proper nouns highlighted (blue, orange, and green boxes). hovering over words inside the region should display a dictionary result, if one is found
### keyboard shortcuts
| key | Description |
| ----------- | ----------- |
| T | toggle translation |
| C | manual refresh |
| J | open Jisho search for word under cursor |
| W | open Japanese Wikipedia search for word under cursor |
| E | open English Wikipedia search for word under cursor |
| R + mouse-drag | select main region when in click through mode |
| Q + mouse-drag | select one time lookup when in click through mode |
## more options/etc
### secondary clipping
After selecting a region, `zoritori` will watch that area for changes, and refresh if any are detected. If you want to select a new region, just click and drag again. If you want to keep your original region, but want to do a one-time look up a word outside the region, right click and drag around the word.
### click through mode
By default, the transparent overlay won't send clicks through to underlying applications, including your game. It will steal focus if you click anywhere on the screen. On Windows only (for now) you can enable click through mode in the `config.ini` file or command-line parameters. On Mac and Linux, this is not supported at the moment.
When click through mode is enabled, use R (without mouse clicking) to drag select a region, and use Q to select a region for a one-time lookup.
### comparing OCR engines
Tesseract is free, open source, and works offline. Unfortunately, in my experience it has less accurate recognition, and sometimes returns very messy bounding box data, making it difficult to accurately place furigana.
Google Cloud Vision has [per usage costs](https://cloud.google.com/vision/pricing), but should be free for low usage, and is closed source and requires an Internet connection (the selected region is sent as an image to Google for processing)
### saving vocabulary
By default nothing is saved. But if you want to save vocabulary words, add a folder name in the `config.ini` file or command-line parameters.
With only `NotesFolder` set, all vocabulary will be saved in one folder. Fullscreen screenshots are saved each time OCR runs, along with a markdown file that include new vocabulary found, for later review.
With only `NotesRoot` set, vocabulary will be saved as above but inside individual folders for each session (once for each time you start `zoritori`) to make review less cumbersome.
With both `NotesFolder` and `NotesRoot` set, `NotesFolder` behavior takes precedence (everything saved in one folder).
| zoritori | /zoritori-0.0.2.post1.tar.gz/zoritori-0.0.2.post1/README.md | README.md |
Zorker - A Twitter bot to play text adventure games
===================================================
`Scott Torborg <http://www.scotttorborg.com>`_
Installation
============
Install with pip::
$ pip install zorker
Running
=======
First set up a new Twitter app, noting consumer key (API key), consumer secret (API secret), access token, and access token secret. There are `helpful instructions for this here <http://nodotcom.org/python-twitter-tutorial.html>`_.
Create a config file, something like ``zorker.ini``::
[zorker]
consumer_key = ...
consumer_secret = ...
access_token = ...
access_token_secret = ...
screen_name = zorker
game_file = zork1.z5
Run the bot::
$ zorker zorker.ini
License
=======
Zorker is licensed under an MIT license. Please see the LICENSE file for more
information.
| zorker | /zorker-0.1.tar.gz/zorker-0.1/README.rst | README.rst |
from descriptions import descriptions
from random import randint
from sys import exit
import lexicon
import guess_game
import behemoth_battle
from sneak import sneak
import aliens_tests
def dead(why):
print "MISSION FAILED\n"
print "="*72
print why
print "Try Again!"
print "Thanks for playing!"
print "\t-Luis (Developer)"
exit(0)
def win(why):
print "MISSION ACCOMPLISHED\n"
print '='*72
print why
print "Success! You escaped! Congratulations!"
print "Thanks for playing!"
print "\t-Luis (Developer)"
exit(0)
class rooms(object):
def resets(self):
reset = {
'laser': False,
'laser_grabbed': False,
'lock' : True,
'behemoth': True,
'animal': True,
'guard': True
}
return reset
def start_room(self, dec):
#Finished
if dec == 'north':
return 'guess_room'
if dec == 'west':
return 'laser_room'
if dec == 'east':
return 'clue_room'
else:
return 'start_room'
def laser_room(self, dec):
#Finished
if dec =='laser' and variable['laser_grabbed'] == True:
print "You can't pick up the laser anymore!"
if dec == 'laser' and variable['laser'] == True:
print "You already have a laser!"
print "You can't grab another one!"
return 'laser_room'
if dec == 'laser' and variable['laser'] == False and variable['laser_grabbed'] == False:
variable['laser'] = True
variable['laser_grabbed'] = True
print "You picked up the laser!"
if dec == 'west':
return 'strip_room'
if dec == 'east':
return 'start_room'
else:
return 'laser_room'
def strip_room(self, dec):
#Finished
if dec == 'peek':
print "You peek into the tunnel."
print "You hear a terrible shriek, and you find yourself face to"
print "face with an ugly little critter."""
if variable['laser'] == False:
print "He bites you and then scampers off!"
health['player'] -= 1
print "You have %d health points remaining." % health['player']
if variable['laser'] == True:
print "He grabs your laser and scampers off!"
variable['laser'] = False
return 'strip_room'
if dec == 'east':
return 'laser_room'
else:
return 'strip_room'
def clue_room(self, dec):
#Finished - needs text
if dec == 'clue':
print "The number you are seeking is from 'A Hitchiker's Guide to the Galaxy'"
print ", the answer to the universe."
return 'clue_room'
if dec == 'east':
return 'beast_room'
if dec == 'west':
return 'start_room'
else:
return 'clue_room'
def guess_room(self, dec):
#Finished
if dec == 'cheat':
variable['lock'] = False
print "Door unlocked."
return 'guess_room'
if dec == 'button':
result = guess_game.guess()
if result == 'lose' or result == 'tie':
print "Sorry! The lock stays."
return 'guess_room'
if result == 'win':
variable['lock'] = False
print "The door is unlocked."
return 'guess_room'
if dec == 'north' and variable['lock'] == False:
return 'farm_room'
if dec == 'north' and variable['lock'] == True:
print "Sorry! That door is locked!"
return 'guess_room'
if dec == 'south':
return 'start_room'
else:
return 'guess_room'
def farm_room(self, dec):
#Finished
if dec == 'kill':
print "\nYou ought to be ashamed of yourself."
variable['animal'] = False
return 'farm_room'
if dec == 'south':
return 'guess_room'
if dec == 'west':
return 'guard_room'
else:
return 'farm_room'
def guard_room(self, dec):
#Finished
if dec == 'cheat':
variable['guard'] = False
print "Guard eliminated."
return 'guard_room'
if dec == 'talk':
if variable['animal'] == False:
print "\nGuard:\tHey! You're the one that killed my animals!"
c
print "Guard:\tI'll teach you a lesson you'll never forget!"
raw_input("\nHit ENTER to continue dialogue\n")
print "You:\tI'm sorry!"
raw_input("\nHit ENTER to continue dialogue\n")
print "Guard:\tSorry won't cut it!"
raw_input("\nHit ENTER to continue dialogue\n")
health['player'] -= 1
print "You have %d health points remaining." % health['player']
return 'guard_room'
if variable['animal'] == True and variable['behemoth'] == True:
print "\nGuard:\tHey. I know the boss wants you locked up."
print "Guard:\tBut if you do me a favor, I'll let you through."
print "Guard:\tI need you to kill the beast that is stored in the box."
print "Guard:\tHe's in a room south-east of here."
print "Guard:\tDo you want me to take you there?"
teleport = raw_input("> ")
print "\nGuard:\tRemember, if you don't kill him, you don't get through!"
if 'yes' in teleport:
return 'beast_room'
else:
return 'guard_room'
if variable['animal'] == True and variable['behemoth'] == False:
print "\nGuard:\tGood! You slayed the beast!"
print "Guard:\tI'll just take a lunch break now..."
variable['guard'] = False
if dec == 'kill' and variable['laser'] == True:
print "You killed the guard with your laser!"
print "\nYour laser is broken now."
variable['laser'] = False
variable['guard'] = False
return 'guard_room'
if dec == 'kill' and variable['laser'] == False:
dead("The guard murdered you.")
if dec == 'sneak':
result = sneak()
if result == 'succeed':
print "\n You successfully sneaked past the guard!"
return 'alf_room'
if result == 'fail':
dead("The guard caught you and killed you.")
if dec == 'west' and variable['guard'] == False:
return 'alf_room'
if dec == 'west' and variable['guard'] == True:
dead("The guard saw you trying to bust the door open.\nHe killed you on the spot.")
if dec == 'east':
return 'farm_room'
else:
return 'guard_room'
def alf_room(self, dec):
if dec == 'kill':
if variable['laser'] == True:
win("You killed Alf!\nYou command his goons to drop you off at your base, from his P.A. system.")
if variable['laser'] == False:
print """You move in closer to Alf, to prepare for the kill.
As you prepare to strike him, he wakes up and trips the secret alarm.
But he was too late. You managed to kill him.
As soon as you finished executing Alf, his small army of soldiers flooded room."""
dead("You were executed.")
if dec == 'talk':
print "Alf:\tAh, Cohn Jonnor. I'm surprised you've made it this far."
raw_input("\nHit ENTER to continue dialogue.\n")
if variable['laser'] == True:
print "Cohn:\tI could kill you right now."
print "Cohn:\tI'm armed."
print "Cohn:\tLet me off this ship right now."
raw_input("\nHit ENTER to continue dialogue.\n")
print "Alf:\tAllright Allright. I'll drop you off somewhere."
print "Alf:\tJust don't do anything stupid."
raw_input("\nHit ENTER to continue\n")
win("Alf dropped you off at your base!")
if variable['laser'] == False:
print "Cohn:\tLet me off this ship."
raw_input("\nHit ENTER to continue dialogue.\n")
if variable['animal'] == False:
print "Alf:\tMy pleasure."
print "*Alf opens a trap door below you."
dead("You fell to your death.")
else:
print "Alf:\tNever!"
print "*Alf attacks you!"
health['player'] -= 1
print "You have %d hit points left." % health['player']
if health['player'] <= 0:
dead("You ran out of health!")
print "Alf:\tI'll let you off if you can answer my riddle."
print "Alf:\tWhat is the answer to the universe?"
guess = raw_input("> ")
answer = '42'
if guess == answer:
win("You answered Alf's riddle correctly, and he let you free!")
if guess != answer:
dead("You answered Alf's riddle incorrectly, and he had you beheaded!")
if dec == 'east':
return 'guard_room'
else:
return 'alf_room'
def answer_room(self, dec):
#Finished
if dec == 'read':
print "'%s'" % descriptions.answer
return 'answer_room'
if dec == 'west':
return 'beast_room'
else:
return 'answer_room'
def beast_room(self,dec):
#Finished
if dec == 'cheat' and variable['behemoth'] == True:
print "Behemoth deactivated."
variable['behemoth'] = False
return 'beast_room'
if dec == 'behemoth' and variable['behemoth'] == False:
print "You've already slayed the beast!"
print "The box is empty, you can get past it now."
return 'beast_room'
if dec == 'behemoth' and variable['behemoth'] == True:
print "A behemoth emerges from the box and attacks you!"
result = behemoth_battle.behemoth()
if result == 'alive':
print "You slayed the beast!"
variable['behemoth'] = False
return 'beast_room'
if result == 'dead':
dead("The beast slayed you!")
if result == 'escape':
print "YOU BARELY ESCAPED WITH YOUR LIFE."
health['player'] -= 1
print "You have %d health points left." % health['player']
return 'beast_room'
return 'beast_room'
if dec == 'east' and variable['behemoth'] == False:
return 'answer_room'
if dec == 'east' and variable['behemoth'] == True:
print "The box is in your way!"
return 'beast_room'
if dec == 'west':
return 'clue_room'
else:
return 'beast_room'
ROOMS = {
'start_room': [rooms().start_room, descriptions.start_room],
'laser_room': [rooms().laser_room, descriptions.laser_room],
'strip_room': [rooms().strip_room, descriptions.strip_room],
'clue_room' : [rooms().clue_room, descriptions.clue_room],
'guess_room': [rooms().guess_room, descriptions.guess_room],
'farm_room': [rooms().farm_room, descriptions.farm_room],
'guard_room': [rooms().guard_room, descriptions.guard_room],
'alf_room': [rooms().alf_room, descriptions.alf_room],
'answer_room': [rooms().answer_room, descriptions.answer_room],
'beast_room': [rooms().beast_room, descriptions.beast_room],
'resets': [rooms().resets, None]
}
def runner(map, start):
next = start
while True:
#next = room name
#room is the room's function without the '()'
#info is the room's description (str)
if health['player'] <= 0:
dead('You ran out of health')
room = map[next][0]
info = map[next][1]
descriptions().guide(next)
print info
words = raw_input("> ")
dec = lexicon.scanner(next,words)
if dec == 'wall':
print "You can't go there, there is a wall!"
runner(ROOMS,next)
next = room(dec)
print "\n____________________________\n"
variable = rooms().resets() #variable is a dictionary with all of the global variables, which is returned by the reset method of rooms.
def start():
aliens_tests.test()
print descriptions.intro
print "TIPS:"
print "\t1)You have two hitpoints. If you run out, you die."
print "\n\t2)Use compass words to navigate between rooms.\n"
runner(ROOMS,'start_room')
health = {'player': 2}
if __name__ == '__main__':
start() | zorky | /zorky-2.0.zip/zorky-2.0/aliens/aliens.py | aliens.py |
class descriptions(object):
intro = "You are Cohn Jonnor. You are the leader of the resistance. Your squad was captured by the enemy a few blocks away from your hidden base, on a rescue mission. It's only a matter of time before the clinkers discover your woman, Emeline, and what's left of the human race.\nYou seem to be in a spaceship of some sort. Your objective is to get off the ship, so you can alert your buddies of the search party before they are found."
characters = {'Cohn Jonnor': 'Leader of the resistance. Protagonist. Received military training. Married to Emeline',
'Emeline': 'Daughter of the president of the United States. Beautiful. No military training. Married to Cohn Jonnor.',
'Alf' : 'The leader of the clinkers, an alien mafia. A renegade group of aliens that seeks to destroy capitalism, and replace it with an agricultural economy. They are excellent farmers.'}
rooms_list = ['start_room', 'laser_room','strip_room','clue_room','guess_room','farm_room','guard_room','alf_room','answer_room','beast_room']
start_room = "You are in an empty room, with four sides."
laser_room = "There is a laser gun in here."
strip_room = "There are strange markings on the walls. There are fresh animal droppings on the floor, as well as a small tunnel\n"
clue_room = "There is an empty satchel on the ground of this room, but it feels heavy."
guess_room = "There is a strange device mounted on the northern wall. It has a big red button on it."
farm_room = "There are chickens and cows in here."
guard_room = "As you walk in you spot a guard. You barrel roll into a haystack before he sees you. He seems friendly enough."
alf_room = "Alf, the alien farmer mafia leader, is sound asleep in his bed."
answer_room = "There is an engraved text on the wall."
beast_room = "There is a giant box in front of the eastern exit."
answer = "The answer is 42."
def guide(self,pos):
sym = {
'start_room': " ",
'laser_room': " ",
'strip_room': " ",
'clue_room' : " ",
'guess_room': " ",
'farm_room' : " ",
'guard_room': " ",
'alf_room' : " ",
'answer_room': " ",
'beast_room': " "
}
sym[pos] = "*"
print "------------|"
print " %s | %s | %s |" % (sym['alf_room'],sym['guard_room'],sym['farm_room'])
print "------------|"
print " | %s |" % sym['guess_room']
print "-------------------------"
print "| %s | %s | %s | %s | %s | %s |" % (sym['strip_room'],sym['laser_room'],sym['start_room'],sym['clue_room'],sym['beast_room'],sym['answer_room'])
print "-------------------------"
sym[pos] = " " | zorky | /zorky-2.0.zip/zorky-2.0/aliens/descriptions.py | descriptions.py |
p1 = 'Cohn Jonnor'
p2 = 'Behemoth'
health = {p1: 100, p2: 100}
actions = ['attack','heal','nuke']
from random import randint
from sys import exit
def attack(p1,p2):
damage = randint(20,40)
health[p2] -= damage
print "\n%s does %d damage to %s!\n" % (p1,damage,p2)
def heal(p1,p2):
heal = randint(1,20)
if health[p1] + heal < 100:
health[p1] += heal
print "\n%s healed by %d points!\n" % (p1,heal)
else: print "\nYou cannot heal this much!\n"
def menu(p1,p2):
print "\n%s' turn!" % p1
print "1: Attack\n2: Heal"
print "Health: %d" % health[p1]
dec = raw_input("> ")
if dec == 'end' or dec =='exit':
print "Goodbye!"
exit(0)
if dec == 'cheat':
health[p2] = -1
if dec == 'attack' or dec == '1':
attack(p1,p2)
if dec == 'heal' or dec == '2':
heal(p1,p2)
def menu_ai(p1,p2):
print "Behemoth\nHealth: %s\n" % health[p1]
h = health[p1]
h2 = health[p2]
dec = 1
if h < 55 and h2 < 50:
dec = 2
if h < 25:
dec = 2
if h > 50 or h2 < 50:
dec = 1
if h > 55:
dec = 1
if dec == 'attack' or dec == 1:
print "Behemoth uses attack!"
attack(p1,p2)
if dec == 'heal' or dec == 2:
print "Behemoth uses heal!"
heal(p1,p2)
def behemoth():
if health[p2] < 0:
print "You slayed the beast!"
return 'dead'
if health[p1] < 0:
return 'alive'
while health[p1] > 0 and health[p2] > 0:
if health[p2] < 0 or health[p1] < 0:
break
if health[p1] < 15:
break
print "---------------------------------------- "
menu(p1,p2)
if health[p2] < 0 or health[p1] < 0:
break
print "---------------------------------------- "
raw_input("Hit ENTER for Behemoth's turn!")
print "---------------------------------------- "
menu_ai(p2,p1)
if health[p2] < 0 or health[p1] < 0:
break
if health[p2] < 0:
#print "You slayed the beast!"
return 'alive'
if health[p1] < 0:
return 'dead'
if health[p1] < 15:
return 'escape'
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
def pick_lock():
print "In order to break the lock, you have to guess the correct number!"
print "I'm thinking of a number between 1 and 10.\nWhat is it?"
num = randint(1,10)
print num
decision = raw_input("> ")
try:
if int(decision) == num:
return 'correct'
if int(decision) != num:
return 'incorrect'
except ValueError:
print "You need to input a number between 1,10!"
return 'incorrect' | zorky | /zorky-2.0.zip/zorky-2.0/aliens/behemoth_battle.py | behemoth_battle.py |
from sys import exit
walls = {
'start_room': ('south','asdfasdgasdfasdfasdfasdfasdfa'),
'laser_room': ('north','south'),
'strip_room': ('north','south','west'),
'clue_room': ('north','south'),
'guess_room': ('west','east'),
'farm_room': ('north','east'),
'guard_room': ('north','south'),
'alf_room': ('north','west','south'),
'answer_room': ('north','south','east'),
'beast_room': ('north','south')
}
alt_compass = 'left down up right'.split()
compass = ['north','south','east','west']
convert = {
'up': 'north',
'down': 'south',
'left': 'west',
'right': 'east'
}
def scanner(level,words):
if 'exit' in words or 'end' in words:
print "\nThanks for playing!"
print "\t-Luis (Developer)"
exit(0)
if words == 'cheat' or words == 'unlock':
return 'cheat'
if 'tits' in words:
print "I like tits too."
words = words.split()
for word in words: #returns wall or direction
if word in compass:
if word in walls[level]:
return 'wall'
return word
if word in alt_compass:
if word in alt_walls[level]:
return 'wall'
return convert[word]
if level == 'clue_room': #returns clue
for each in 'satchel bag container purse manpurse it object thing'.split():
if each in words:
for every in ['pick','grab','lift','get']:
if every in words:
return 'clue'
if level == 'answer_room': #returns read
if 'read' in words:
for each in 'it text engraving stuff wall'.split():
if each in words:
return 'read'
if level == 'laser_room': #returns laser
for each in ['pick','grab','lift','get']:
if each in words:
for every in ['it','laser','up']:
if every in words:
return 'laser'
if level == 'strip_room': #returns peek
for each in 'peek look check crawl duck'.split():
if each in words:
for every in 'there it tunnel tunel tunell in'.split():
if every in words:
return 'peek'
if level == 'guess_room': #returns button
for each in ['push','press','hit']:
if each in words:
if 'button' in words or 'it' in words:
return 'button'
if level == 'farm_room': #returns kill
for each in 'kill slay eat assassinate exterminate execute chop food annhilate'.split():
if each in words:
for every in 'them it all chick chickens cows animals animal cow beast thing flesh meat'.split():
if every in words:
return 'kill'
if level == 'guard_room': #returns talk, kill, or sneak
for each in 'talk spring approach get exit jump leave'.split(): #returns talk
if each in words:
for every in 'guard him it out haystack hiding spot'.split():
if every in words:
return 'talk'
for each in 'kill assassinate butcher execute murder jump attack shoot'.split(): #returns kill
if each in words:
for every in 'him guard it fucker obstacle her'.split():
if every in words:
return 'kill'
for each in 'sneak slip creep run'.split(): #returns sneak
if each in words:
for every in 'him guard past'.split():
if every in words:
return 'sneak'
if level == 'alf_room':
for each in 'kill assassinate execute snuff strangle murder end'.split(): # returns kill
if each in words:
for every in 'him alf leader alien freak shit fuck'.split():
if every in words:
return 'kill'
for each in 'talk wake converse alert'.split():
if each in words:
for every in 'him alf leader alien fool gangster farmer it'.split():
if every in words:
return 'talk'
if level == 'beast_room': #returns behemoth
for each in 'open check pry see break move crack peek kill'.split():
if each in words:
if 'box' in words or 'it' in words or 'container' in words:
return 'behemoth'
alt_walls = {
'start_room': ('down','asdfasdgasdfasdfasdfasdfasdfa'),
'laser_room': ('up','down'),
'strip_room': ('up','down','left'),
'clue_room': ('up','down'),
'guess_room': ('left','right'),
'farm_room': ('up','right'),
'guard_room': ('up','down'),
'alf_room': ('up','left','down'),
'answer_room': ('up','down','right'),
'bright_room': ('up','down')
} | zorky | /zorky-2.0.zip/zorky-2.0/aliens/lexicon.py | lexicon.py |
from typing import Any, Dict, List
def safe_nested_get(d: Dict, keys: List, default=None) -> Any:
"""Get a nested key's value from a dictionary.
If the key doesn't exist, return `default` instead of raising a KeyError or TypeError.
Args:
d: A dictionary to search for 'keys'.
keys: A list representing a nested dictionary key structure.
E.g. safe_nested_get(d, keys=["a", "b", "c"] is a safe version of d["a"]["b"]["c"].
default: The value to return if the nested `keys` structure doesn't exist in `d`.
:Author: Zax Rosenberg
"""
for key in keys:
try:
d = d[key]
except (KeyError, TypeError):
# KeyError: '<key>'
# TypeError: '<type>' object is not subscriptable
return default
return d
def safe_nested_set(d: Dict, keys: List, value: Any) -> None:
"""Set a dictionary's `value` for a set of nested `keys`, inplace.
If intermediate keys don't exist, they'll be created.
In cases where `value` is a dictionary, and `keys` already exists,
the previous value will be overwritten.
To merge values instead, use `safe_nested_update`.
Args:
d: A dictionary to search for 'keys'.
keys: A list representing a nested dictionary key structure.
E.g. safe_nested_set(d, keys=["a", "b", "c"], "foo") is a safe version of d["a"]["b"]["c"] = "foo".
value: The value to set.
:Author: Zax Rosenberg
"""
for key in keys[:-1]:
d = d.setdefault(key, {})
d[keys[-1]] = value
def safe_nested_update(d: Dict, updates: Dict) -> None:
"""Update a dictionary's contents with values from another nested dictionary, inplace.
This method avoids overwriting the lowest level key when there are collisions, instead merging them.
To overwrite on collisions, use `safe_nested_set`.
Args:
d: A dictionary to update with `updates`.
updates: A dictionary from which to take values to add to `d`.
:Author: Zax Rosenberg
"""
for k, v in updates.items():
if isinstance(v, dict):
safe_nested_update(d.setdefault(k, {}), v)
else:
d[k] = v | zoro-ds-utils | /zoro_ds_utils-0.0.1.tar.gz/zoro_ds_utils-0.0.1/zoro_ds_utils/general.py | general.py |
# Google play python API [](https://travis-ci.org/NoMore201/googleplay-api)
This project contains an unofficial API for google play interactions. The code mainly comes from
[GooglePlayAPI project](https://github.com/egirault/googleplay-api/) which is not
maintained anymore. The code was updated with some important changes:
* ac2dm authentication with checkin and device info upload
* updated search and download calls
* select the device you want to fake from a list of pre-defined values (check `device.properties`)
(defaults to a OnePlus One)
# Build
This is the recommended way to build the package, since setuptools will take care of
generating the `googleplay_pb2.py` file needed by the library (check the `setup.py`)
```
$ python setup.py build
```
# Usage
Check scripts in `test` directory for more examples on how to use this API.
```
from gpapi.googleplay import GooglePlayAPI
mail = "[email protected]"
passwd = "mypasswd"
api = GooglePlayAPI(locale="en_US", timezone="UTC", device_codename="hero2lte")
api.login(email=mail, password=passwd)
result = api.search("firefox")
for doc in result:
if 'docid' in doc:
print("doc: {}".format(doc["docid"]))
for cluster in doc["child"]:
print("\tcluster: {}".format(cluster["docid"]))
for app in cluster["child"]:
print("\t\tapp: {}".format(app["docid"]))
```
For first time logins, you should only provide email and password.
The module will take care of initalizing the api, upload device information
to the google account you supplied, and retrieving
a Google Service Framework ID (which, from now on, will be the android ID of your fake device).
For the next logins you **should** save the gsfId and the authSubToken, and provide them as parameters
to the login function. If you login again with email and password, this is the equivalent of
re-initalizing your android device with a google account, invalidating previous gsfId and authSubToken.
| zoro-gpapi | /zoro-gpapi-0.4.5.tar.gz/zoro-gpapi-0.4.5/README.md | README.md |
from . import googleplay_pb2
from time import time
from os import path
from sys import version_info
from re import match
VERSION = version_info[0]
if VERSION == 2:
import ConfigParser
else:
import configparser
DFE_TARGETS = "CAEScFfqlIEG6gUYogFWrAISK1WDAg+hAZoCDgIU1gYEOIACFkLMAeQBnASLATlASUuyAyqCAjY5igOMBQzfA/IClwFbApUC4ANbtgKVAS7OAX8YswHFBhgDwAOPAmGEBt4OfKkB5weSB5AFASkiN68akgMaxAMSAQEBA9kBO7UBFE1KVwIDBGs3go6BBgEBAgMECQgJAQIEAQMEAQMBBQEBBAUEFQYCBgUEAwMBDwIBAgOrARwBEwMEAg0mrwESfTEcAQEKG4EBMxghChMBDwYGASI3hAEODEwXCVh/EREZA4sBYwEdFAgIIwkQcGQRDzQ2fTC2AjfVAQIBAYoBGRg2FhYFBwEqNzACJShzFFblAo0CFxpFNBzaAd0DHjIRI4sBJZcBPdwBCQGhAUd2A7kBLBVPngEECHl0UEUMtQETigHMAgUFCc0BBUUlTywdHDgBiAJ+vgKhAU0uAcYCAWQ/5ALUAw1UwQHUBpIBCdQDhgL4AY4CBQICjARbGFBGWzA1CAEMOQH+BRAOCAZywAIDyQZ2MgM3BxsoAgUEBwcHFia3AgcGTBwHBYwBAlcBggFxSGgIrAEEBw4QEqUCASsWadsHCgUCBQMD7QICA3tXCUw7ugJZAwGyAUwpIwM5AwkDBQMJA5sBCw8BNxBVVBwVKhebARkBAwsQEAgEAhESAgQJEBCZATMdzgEBBwG8AQQYKSMUkAEDAwY/CTs4/wEaAUt1AwEDAQUBAgIEAwYEDx1dB2wGeBFgTQ"
GOOGLE_PUBKEY = "AAAAgMom/1a/v0lblO2Ubrt60J2gcuXSljGFQXgcyZWveWLEwo6prwgi3iJIZdodyhKZQrNWp5nKJ3srRXcUW+F1BD3baEVGcmEgqaLZUNBjm057pKRI16kB0YppeGx5qIQ5QjKzsR8ETQbKLNWgRY0QRNVz34kMJR3P/LgHax/6rmf5AAAAAwEAAQ=="
ACCOUNT = "HOSTED_OR_GOOGLE"
# parse phone config from the file 'device.properties'.
# if you want to add another phone, just create another section in
# the file. Some configurations for common phones can be found here:
# https://github.com/yeriomin/play-store-api/tree/master/src/main/resources
filepath = path.join(path.dirname(path.realpath(__file__)),
'device.properties')
if VERSION == 2:
config = ConfigParser.ConfigParser()
else:
config = configparser.ConfigParser()
config.read(filepath)
class InvalidLocaleError(Exception):
pass
class InvalidTimezoneError(Exception):
pass
def getDevicesCodenames():
"""Returns a list containing devices codenames"""
return config.sections()
def getDevicesReadableNames():
"""Returns codename and readable name for each device"""
return [{'codename': s,
'readableName': config.get(s).get('userreadablename')}
for s in getDevicesCodenames()]
class DeviceBuilder(object):
def __init__(self, device):
self.device = {}
for (key, value) in config.items(device):
self.device[key] = value
def setLocale(self, locale):
# test if provided locale is valid
if locale is None or type(locale) is not str:
raise InvalidLocaleError()
# check if locale matches the structure of a common
# value like "en_US"
if match(r'[a-z]{2}\_[A-Z]{2}', locale) is None:
raise InvalidLocaleError()
self.locale = locale
def setTimezone(self, timezone):
if timezone is None or type(timezone) is not str:
timezone = self.device.get('timezone')
if timezone is None:
raise InvalidTimezoneError()
self.timezone = timezone
def getBaseHeaders(self):
return {"Accept-Language": self.locale.replace('_', '-'),
"X-DFE-Encoded-Targets": DFE_TARGETS,
"User-Agent": self.getUserAgent(),
"X-DFE-Client-Id": "am-android-google",
"X-DFE-MCCMNC": self.device.get('celloperator'),
"X-DFE-Network-Type": "4",
"X-DFE-Content-Filters": "",
"X-DFE-Request-Params": "timeoutMs=4000"}
def getDeviceUploadHeaders(self):
headers = self.getBaseHeaders()
headers["X-DFE-Enabled-Experiments"] = "cl:billing.select_add_instrument_by_default"
headers["X-DFE-Unsupported-Experiments"] = ("nocache:billing.use_charging_poller,"
"market_emails,buyer_currency,prod_baseline,checkin.set_asset_paid_app_field,"
"shekel_test,content_ratings,buyer_currency_in_app,nocache:encrypted_apk,recent_changes")
headers["X-DFE-SmallestScreenWidthDp"] = "320"
headers["X-DFE-Filter-Level"] = "3"
return headers
def getUserAgent(self):
version_string = self.device.get('vending.versionstring')
if version_string is None:
version_string = '8.4.19.V-all [0] [FP] 175058788'
return ("Android-Finsky/{versionString} ("
"api=3"
",versionCode={versionCode}"
",sdk={sdk}"
",device={device}"
",hardware={hardware}"
",product={product}"
",platformVersionRelease={platform_v}"
",model={model}"
",buildId={build_id}"
",isWideScreen=0"
",supportedAbis={supported_abis}"
")").format(versionString=version_string,
versionCode=self.device.get('vending.version'),
sdk=self.device.get('build.version.sdk_int'),
device=self.device.get('build.device'),
hardware=self.device.get('build.hardware'),
product=self.device.get('build.product'),
platform_v=self.device.get('build.version.release'),
model=self.device.get('build.model'),
build_id=self.device.get('build.id'),
supported_abis=self.device.get('platforms').replace(',', ';'))
def getAuthHeaders(self, gsfid):
headers = {"User-Agent": ("GoogleAuth/1.4 ("
"{device} {id}"
")").format(device=self.device.get('build.device'),
id=self.device.get('build.id'))}
if gsfid is not None:
headers['device'] = "{0:x}".format(gsfid)
return headers
def getLoginParams(self, email, encrypted_passwd):
return {"Email": email,
"EncryptedPasswd": encrypted_passwd,
"add_account": "1",
"accountType": ACCOUNT,
"google_play_services_version": self.device.get('gsf.version'),
"has_permission": "1",
"source": "android",
"device_country": self.locale[0:2],
"lang": self.locale,
"client_sig": "38918a453d07199354f8b19af05ec6562ced5788",
"callerSig": "38918a453d07199354f8b19af05ec6562ced5788"}
def getAndroidCheckinRequest(self):
request = googleplay_pb2.AndroidCheckinRequest()
request.id = 0
request.checkin.CopyFrom(self.getAndroidCheckin())
request.locale = self.locale
request.timeZone = self.timezone
request.version = 3
request.deviceConfiguration.CopyFrom(self.getDeviceConfig())
request.fragment = 0
return request
def getDeviceConfig(self):
libList = self.device['sharedlibraries'].split(",")
featureList = self.device['features'].split(",")
localeList = self.device['locales'].split(",")
glList = self.device['gl.extensions'].split(",")
platforms = self.device['platforms'].split(",")
hasFiveWayNavigation = (self.device['hasfivewaynavigation'] == 'true')
hasHardKeyboard = (self.device['hashardkeyboard'] == 'true')
deviceConfig = googleplay_pb2.DeviceConfigurationProto()
deviceConfig.touchScreen = int(self.device['touchscreen'])
deviceConfig.keyboard = int(self.device['keyboard'])
deviceConfig.navigation = int(self.device['navigation'])
deviceConfig.screenLayout = int(self.device['screenlayout'])
deviceConfig.hasHardKeyboard = hasHardKeyboard
deviceConfig.hasFiveWayNavigation = hasFiveWayNavigation
deviceConfig.screenDensity = int(self.device['screen.density'])
deviceConfig.screenWidth = int(self.device['screen.width'])
deviceConfig.screenHeight = int(self.device['screen.height'])
deviceConfig.glEsVersion = int(self.device['gl.version'])
for x in platforms:
deviceConfig.nativePlatform.append(x)
for x in libList:
deviceConfig.systemSharedLibrary.append(x)
for x in featureList:
deviceConfig.systemAvailableFeature.append(x)
for x in localeList:
deviceConfig.systemSupportedLocale.append(x)
for x in glList:
deviceConfig.glExtension.append(x)
return deviceConfig
def getAndroidBuild(self):
androidBuild = googleplay_pb2.AndroidBuildProto()
androidBuild.id = self.device['build.fingerprint']
androidBuild.product = self.device['build.hardware']
androidBuild.carrier = self.device['build.brand']
androidBuild.radio = self.device['build.radio']
androidBuild.bootloader = self.device['build.bootloader']
androidBuild.device = self.device['build.device']
androidBuild.sdkVersion = int(self.device['build.version.sdk_int'])
androidBuild.model = self.device['build.model']
androidBuild.manufacturer = self.device['build.manufacturer']
androidBuild.buildProduct = self.device['build.product']
androidBuild.client = self.device['client']
androidBuild.otaInstalled = False
androidBuild.timestamp = int(time()/1000)
androidBuild.googleServices = int(self.device['gsf.version'])
return androidBuild
def getAndroidCheckin(self):
androidCheckin = googleplay_pb2.AndroidCheckinProto()
androidCheckin.build.CopyFrom(self.getAndroidBuild())
androidCheckin.lastCheckinMsec = 0
androidCheckin.cellOperator = self.device['celloperator']
androidCheckin.simOperator = self.device['simoperator']
androidCheckin.roaming = self.device['roaming']
androidCheckin.userNumber = 0
return androidCheckin | zoro-gpapi | /zoro-gpapi-0.4.5.tar.gz/zoro-gpapi-0.4.5/gpapi/config.py | config.py |
from base64 import b64decode, urlsafe_b64encode
from datetime import datetime
from cryptography.hazmat.primitives.asymmetric.utils import encode_dss_signature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.serialization import load_der_public_key
from cryptography.hazmat.primitives.asymmetric import padding
import requests
from . import googleplay_pb2, config, utils
ssl_verify = True
BASE = "https://android.clients.google.com/"
FDFE = BASE + "fdfe/"
CHECKIN_URL = BASE + "checkin"
AUTH_URL = BASE + "auth"
UPLOAD_URL = FDFE + "uploadDeviceConfig"
SEARCH_URL = FDFE + "search"
DETAILS_URL = FDFE + "details"
HOME_URL = FDFE + "homeV2"
BROWSE_URL = FDFE + "browse"
DELIVERY_URL = FDFE + "delivery"
PURCHASE_URL = FDFE + "purchase"
SEARCH_SUGGEST_URL = FDFE + "searchSuggest"
BULK_URL = FDFE + "bulkDetails"
LOG_URL = FDFE + "log"
TOC_URL = FDFE + "toc"
ACCEPT_TOS_URL = FDFE + "acceptTos"
LIST_URL = FDFE + "list"
REVIEWS_URL = FDFE + "rev"
OAUTH_SERVICE = "oauth2:https://www.googleapis.com/auth/googleplay"
CONTENT_TYPE_URLENC = "application/x-www-form-urlencoded; charset=UTF-8"
CONTENT_TYPE_PROTO = "application/x-protobuf"
class LoginError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RequestError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SecurityCheckError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GooglePlayAPI(object):
"""Google Play Unofficial API Class
Usual APIs methods are login(), search(), details(), bulkDetails(),
download(), browse(), reviews() and list()."""
def __init__(self, locale="en_US", timezone="UTC", device_codename="bacon",
proxies_config=None):
self.authSubToken = None
self.gsfId = None
self.device_config_token = None
self.deviceCheckinConsistencyToken = None
self.dfeCookie = None
self.proxies_config = proxies_config
self.deviceBuilder = config.DeviceBuilder(device_codename)
self.setLocale(locale)
self.setTimezone(timezone)
def setLocale(self, locale):
self.deviceBuilder.setLocale(locale)
def setTimezone(self, timezone):
self.deviceBuilder.setTimezone(timezone)
def encryptPassword(self, login, passwd):
"""Encrypt credentials using the google publickey, with the
RSA algorithm"""
# structure of the binary key:
#
# *-------------------------------------------------------*
# | modulus_length | modulus | exponent_length | exponent |
# *-------------------------------------------------------*
#
# modulus_length and exponent_length are uint32
binaryKey = b64decode(config.GOOGLE_PUBKEY)
# modulus
i = utils.readInt(binaryKey, 0)
modulus = utils.toBigInt(binaryKey[4:][0:i])
# exponent
j = utils.readInt(binaryKey, i + 4)
exponent = utils.toBigInt(binaryKey[i + 8:][0:j])
# calculate SHA1 of the pub key
digest = hashes.Hash(hashes.SHA1(), backend=default_backend())
digest.update(binaryKey)
h = b'\x00' + digest.finalize()[0:4]
# generate a public key
der_data = encode_dss_signature(modulus, exponent)
publicKey = load_der_public_key(der_data, backend=default_backend())
# encrypt email and password using pubkey
to_be_encrypted = login.encode() + b'\x00' + passwd.encode()
ciphertext = publicKey.encrypt(
to_be_encrypted,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None
)
)
return urlsafe_b64encode(h + ciphertext)
def setAuthSubToken(self, authSubToken):
self.authSubToken = authSubToken
def getHeaders(self, upload_fields=False):
"""Return the default set of request headers, which
can later be expanded, based on the request type"""
if upload_fields:
headers = self.deviceBuilder.getDeviceUploadHeaders()
else:
headers = self.deviceBuilder.getBaseHeaders()
if self.gsfId is not None:
headers["X-DFE-Device-Id"] = "{0:x}".format(self.gsfId)
if self.authSubToken is not None:
headers["Authorization"] = "Bearer %s" % self.authSubToken
if self.device_config_token is not None:
headers["X-DFE-Device-Config-Token"] = self.device_config_token
if self.deviceCheckinConsistencyToken is not None:
headers["X-DFE-Device-Checkin-Consistency-Token"] = self.deviceCheckinConsistencyToken
if self.dfeCookie is not None:
headers["X-DFE-Cookie"] = self.dfeCookie
return headers
def checkin(self, email, ac2dmToken):
headers = self.getHeaders()
headers["Content-Type"] = CONTENT_TYPE_PROTO
request = self.deviceBuilder.getAndroidCheckinRequest()
stringRequest = request.SerializeToString()
res = requests.post(CHECKIN_URL, data=stringRequest,
headers=headers, verify=ssl_verify,
proxies=self.proxies_config)
response = googleplay_pb2.AndroidCheckinResponse()
response.ParseFromString(res.content)
self.deviceCheckinConsistencyToken = response.deviceCheckinConsistencyToken
# checkin again to upload gfsid
request.id = response.androidId
request.securityToken = response.securityToken
request.accountCookie.append("[" + email + "]")
request.accountCookie.append(ac2dmToken)
stringRequest = request.SerializeToString()
requests.post(CHECKIN_URL,
data=stringRequest,
headers=headers,
verify=ssl_verify,
proxies=self.proxies_config)
return response.androidId
def uploadDeviceConfig(self):
"""Upload the device configuration of the fake device
selected in the __init__ methodi to the google account."""
upload = googleplay_pb2.UploadDeviceConfigRequest()
upload.deviceConfiguration.CopyFrom(self.deviceBuilder.getDeviceConfig())
headers = self.getHeaders(upload_fields=True)
stringRequest = upload.SerializeToString()
response = requests.post(UPLOAD_URL, data=stringRequest,
headers=headers,
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
response = googleplay_pb2.ResponseWrapper.FromString(response.content)
try:
if response.payload.HasField('uploadDeviceConfigResponse'):
self.device_config_token = response.payload.uploadDeviceConfigResponse
self.device_config_token = self.device_config_token.uploadDeviceConfigToken
except ValueError:
pass
def login(self, email=None, password=None, gsfId=None, authSubToken=None):
"""Login to your Google Account.
For first time login you should provide:
* email
* password
For the following logins you need to provide:
* gsfId
* authSubToken"""
if email is not None and password is not None:
# First time setup, where we obtain an ac2dm token and
# upload device information
encryptedPass = self.encryptPassword(email, password).decode('utf-8')
# AC2DM token
params = self.deviceBuilder.getLoginParams(email, encryptedPass)
params['service'] = 'ac2dm'
params['add_account'] = '1'
params['callerPkg'] = 'com.google.android.gms'
headers = self.deviceBuilder.getAuthHeaders(self.gsfId)
headers['app'] = 'com.google.android.gsm'
response = requests.post(AUTH_URL, data=params, verify=ssl_verify,
proxies=self.proxies_config)
data = response.text.split()
params = {}
for d in data:
if "=" not in d:
continue
k, v = d.split("=", 1)
params[k.strip().lower()] = v.strip()
if "auth" in params:
ac2dmToken = params["auth"]
elif "error" in params:
if "NeedsBrowser" in params["error"]:
raise SecurityCheckError("Security check is needed, try to visit "
"https://accounts.google.com/b/0/DisplayUnlockCaptcha "
"to unlock, or setup an app-specific password")
raise LoginError("server says: " + params["error"])
else:
raise LoginError("Auth token not found.")
self.gsfId = self.checkin(email, ac2dmToken)
self.getAuthSubToken(email, encryptedPass)
self.uploadDeviceConfig()
elif gsfId is not None and authSubToken is not None:
# no need to initialize API
self.gsfId = gsfId
self.setAuthSubToken(authSubToken)
# check if token is valid with a simple search
self.search('drv')
else:
raise LoginError('Either (email,pass) or (gsfId, authSubToken) is needed')
def getAuthSubToken(self, email, passwd):
requestParams = self.deviceBuilder.getLoginParams(email, passwd)
requestParams['service'] = 'androidmarket'
requestParams['app'] = 'com.android.vending'
headers = self.deviceBuilder.getAuthHeaders(self.gsfId)
headers['app'] = 'com.android.vending'
response = requests.post(AUTH_URL,
data=requestParams,
verify=ssl_verify,
headers=headers,
proxies=self.proxies_config)
data = response.text.split()
params = {}
for d in data:
if "=" not in d:
continue
k, v = d.split("=", 1)
params[k.strip().lower()] = v.strip()
if "token" in params:
master_token = params["token"]
second_round_token = self.getSecondRoundToken(master_token, requestParams)
self.setAuthSubToken(second_round_token)
elif "error" in params:
raise LoginError("server says: " + params["error"])
else:
raise LoginError("auth token not found.")
def getSecondRoundToken(self, first_token, params):
if self.gsfId is not None:
params['androidId'] = "{0:x}".format(self.gsfId)
params['Token'] = first_token
params['check_email'] = '1'
params['token_request_options'] = 'CAA4AQ=='
params['system_partition'] = '1'
params['_opt_is_called_from_account_manager'] = '1'
params['service'] = OAUTH_SERVICE
params.pop('Email')
params.pop('EncryptedPasswd')
headers = self.deviceBuilder.getAuthHeaders(self.gsfId)
headers['app'] = 'com.android.vending'
response = requests.post(AUTH_URL,
data=params,
headers=headers,
verify=ssl_verify,
proxies=self.proxies_config)
data = response.text.split()
params = {}
for d in data:
if "=" not in d:
continue
k, v = d.split("=", 1)
params[k.strip().lower()] = v.strip()
if "auth" in params:
return params["auth"]
elif "error" in params:
raise LoginError("server says: " + params["error"])
else:
raise LoginError("Auth token not found.")
def executeRequestApi2(self, path, post_data=None, content_type=CONTENT_TYPE_URLENC, params=None):
if self.authSubToken is None:
raise LoginError("You need to login before executing any request")
headers = self.getHeaders()
headers["Content-Type"] = content_type
if post_data is not None:
response = requests.post(path,
data=str(post_data),
headers=headers,
params=params,
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
else:
response = requests.get(path,
headers=headers,
params=params,
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
message = googleplay_pb2.ResponseWrapper.FromString(response.content)
if message.commands.displayErrorMessage != "":
raise RequestError(message.commands.displayErrorMessage)
return message
def searchSuggest(self, query):
params = {"c": "3",
"q": requests.utils.quote(query),
"ssis": "120",
"sst": "2"}
data = self.executeRequestApi2(SEARCH_SUGGEST_URL, params=params)
entryIterator = data.payload.searchSuggestResponse.entry
return list(map(utils.parseProtobufObj, entryIterator))
def search(self, query):
""" Search the play store for an app.
nb_result (int): is the maximum number of result to be returned
offset (int): is used to take result starting from an index.
"""
if self.authSubToken is None:
raise LoginError("You need to login before executing any request")
path = SEARCH_URL + "?c=3&q={}".format(requests.utils.quote(query))
# FIXME: not sure if this toc call should be here
self.toc()
data = self.executeRequestApi2(path)
if utils.hasPrefetch(data):
response = data.preFetch[0].response
else:
response = data
resIterator = response.payload.listResponse.doc
return list(map(utils.parseProtobufObj, resIterator))
def details(self, packageName):
"""Get app details from a package name.
packageName is the app unique ID (usually starting with 'com.')."""
path = DETAILS_URL + "?doc={}".format(requests.utils.quote(packageName))
data = self.executeRequestApi2(path)
return utils.parseProtobufObj(data.payload.detailsResponse.docV2)
def bulkDetails(self, packageNames):
"""Get several apps details from a list of package names.
This is much more efficient than calling N times details() since it
requires only one request. If an item is not found it returns an empty object
instead of throwing a RequestError('Item not found') like the details() function
Args:
packageNames (list): a list of app IDs (usually starting with 'com.').
Returns:
a list of dictionaries containing docv2 data, or None
if the app doesn't exist"""
params = {'au': '1'}
req = googleplay_pb2.BulkDetailsRequest()
req.docid.extend(packageNames)
data = req.SerializeToString()
message = self.executeRequestApi2(BULK_URL,
post_data=data.decode("utf-8"),
content_type=CONTENT_TYPE_PROTO,
params=params)
response = message.payload.bulkDetailsResponse
return [None if not utils.hasDoc(entry) else
utils.parseProtobufObj(entry.doc)
for entry in response.entry]
def home(self, cat=None):
path = HOME_URL + "?c=3&nocache_isui=true"
if cat is not None:
path += "&cat={}".format(cat)
data = self.executeRequestApi2(path)
if utils.hasPrefetch(data):
response = data.preFetch[0].response
else:
response = data
resIterator = response.payload.listResponse.doc
return list(map(utils.parseProtobufObj, resIterator))
def browse(self, cat=None, subCat=None):
"""Browse categories. If neither cat nor subcat are specified,
return a list of categories, otherwise it return a list of apps
using cat (category ID) and subCat (subcategory ID) as filters."""
path = BROWSE_URL + "?c=3"
if cat is not None:
path += "&cat={}".format(requests.utils.quote(cat))
if subCat is not None:
path += "&ctr={}".format(requests.utils.quote(subCat))
data = self.executeRequestApi2(path)
return utils.parseProtobufObj(data.payload.browseResponse)
def list(self, cat, ctr=None, nb_results=None, offset=None):
"""List all possible subcategories for a specific category. If
also a subcategory is provided, list apps from this category.
Args:
cat (str): category id
ctr (str): subcategory id
nb_results (int): if a subcategory is specified, limit number
of results to this number
offset (int): if a subcategory is specified, start counting from this
result
Returns:
A list of categories. If subcategory is specified, a list of apps in this
category.
"""
path = LIST_URL + "?c=3&cat={}".format(requests.utils.quote(cat))
if ctr is not None:
path += "&ctr={}".format(requests.utils.quote(ctr))
if nb_results is not None:
path += "&n={}".format(requests.utils.quote(str(nb_results)))
if offset is not None:
path += "&o={}".format(requests.utils.quote(str(offset)))
data = self.executeRequestApi2(path)
clusters = []
docs = []
if ctr is None:
# list subcategories
for pf in data.preFetch:
for cluster in pf.response.payload.listResponse.doc:
clusters.extend(cluster.child)
return [c.docid for c in clusters]
else:
apps = []
for d in data.payload.listResponse.doc: # categories
for c in d.child: # sub-category
for a in c.child: # app
apps.append(utils.parseProtobufObj(a))
return apps
def reviews(self, packageName, filterByDevice=False, sort=2,
nb_results=None, offset=None):
"""Browse reviews for an application
Args:
packageName (str): app unique ID.
filterByDevice (bool): filter results for current device
sort (int): sorting criteria (values are unknown)
nb_results (int): max number of reviews to return
offset (int): return reviews starting from an offset value
Returns:
dict object containing all the protobuf data returned from
the api
"""
# TODO: select the number of reviews to return
path = REVIEWS_URL + "?doc={}&sort={}".format(requests.utils.quote(packageName), sort)
if nb_results is not None:
path += "&n={}".format(nb_results)
if offset is not None:
path += "&o={}".format(offset)
if filterByDevice:
path += "&dfil=1"
data = self.executeRequestApi2(path)
output = []
for review in data.payload.reviewResponse.getResponse.review:
output.append(utils.parseProtobufObj(review))
return output
def _deliver_data(self, url, cookies):
headers = self.getHeaders()
response = requests.get(url, headers=headers,
cookies=cookies, verify=ssl_verify,
stream=True, timeout=60,
proxies=self.proxies_config)
total_size = response.headers.get('content-length')
chunk_size = 32 * (1 << 10)
return {'data': response.iter_content(chunk_size=chunk_size),
'total_size': total_size,
'chunk_size': chunk_size}
def delivery(self, packageName, versionCode=None, offerType=1,
downloadToken=None, expansion_files=False):
"""Download an already purchased app.
Args:
packageName (str): app unique ID (usually starting with 'com.')
versionCode (int): version to download
offerType (int): different type of downloads (mostly unused for apks)
downloadToken (str): download token returned by 'purchase' API
progress_bar (bool): wether or not to print a progress bar to stdout
Returns:
Dictionary containing apk data and a list of expansion files. As stated
in android documentation, there can be at most 2 expansion files, one with
main content, and one for patching the main content. Their names should
follow this format:
[main|patch].<expansion-version>.<package-name>.obb
Data to build this name string is provided in the dict object. For more
info check https://developer.android.com/google/play/expansion-files.html
"""
if versionCode is None:
# pick up latest version
appDetails = self.details(packageName).get('details').get('appDetails')
versionCode = appDetails.get('versionCode')
params = {'ot': str(offerType),
'doc': packageName,
'vc': str(versionCode)}
headers = self.getHeaders()
if downloadToken is not None:
params['dtok'] = downloadToken
response = requests.get(DELIVERY_URL, headers=headers,
params=params, verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
response = googleplay_pb2.ResponseWrapper.FromString(response.content)
if response.commands.displayErrorMessage != "":
raise RequestError(response.commands.displayErrorMessage)
elif response.payload.deliveryResponse.appDeliveryData.downloadUrl == "":
raise RequestError('App not purchased')
else:
result = {}
result['docId'] = packageName
result['additionalData'] = []
result['splits'] = []
downloadUrl = response.payload.deliveryResponse.appDeliveryData.downloadUrl
cookie = response.payload.deliveryResponse.appDeliveryData.downloadAuthCookie[0]
cookies = {
str(cookie.name): str(cookie.value)
}
result['file'] = self._deliver_data(downloadUrl, cookies)
for split in response.payload.deliveryResponse.appDeliveryData.split:
a = {}
a['name'] = split.name
a['file'] = self._deliver_data(split.downloadUrl, None)
result['splits'].append(a)
if not expansion_files:
return result
for obb in response.payload.deliveryResponse.appDeliveryData.additionalFile:
a = {}
# fileType == 0 -> main
# fileType == 1 -> patch
if obb.fileType == 0:
obbType = 'main'
else:
obbType = 'patch'
a['type'] = obbType
a['versionCode'] = obb.versionCode
a['file'] = self._deliver_data(obb.downloadUrl, None)
result['additionalData'].append(a)
return result
def download(self, packageName, versionCode=None, offerType=1, expansion_files=False):
"""Download an app and return its raw data (APK file). Free apps need
to be "purchased" first, in order to retrieve the download cookie.
If you want to download an already purchased app, use *delivery* method.
Args:
packageName (str): app unique ID (usually starting with 'com.')
versionCode (int): version to download
offerType (int): different type of downloads (mostly unused for apks)
downloadToken (str): download token returned by 'purchase' API
progress_bar (bool): wether or not to print a progress bar to stdout
Returns
Dictionary containing apk data and optional expansion files
(see *delivery*)
"""
if self.authSubToken is None:
raise LoginError("You need to login before executing any request")
if versionCode is None:
# pick up latest version
appDetails = self.details(packageName).get('details').get('appDetails')
versionCode = appDetails.get('versionCode')
headers = self.getHeaders()
params = {'ot': str(offerType),
'doc': packageName,
'vc': str(versionCode)}
#self.log(packageName)
response = requests.post(PURCHASE_URL, headers=headers,
params=params, verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
response = googleplay_pb2.ResponseWrapper.FromString(response.content)
if response.commands.displayErrorMessage != "":
raise RequestError(response.commands.displayErrorMessage)
else:
dlToken = response.payload.buyResponse.downloadToken
return self.delivery(packageName, versionCode, offerType, dlToken,
expansion_files=expansion_files)
def log(self, docid):
log_request = googleplay_pb2.LogRequest()
log_request.downloadConfirmationQuery = "confirmFreeDownload?doc=" + docid
timestamp = int(datetime.now().timestamp())
log_request.timestamp = timestamp
string_request = log_request.SerializeToString()
response = requests.post(LOG_URL,
data=string_request,
headers=self.getHeaders(),
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
response = googleplay_pb2.ResponseWrapper.FromString(response.content)
if response.commands.displayErrorMessage != "":
raise RequestError(response.commands.displayErrorMessage)
def toc(self):
response = requests.get(TOC_URL,
headers=self.getHeaders(),
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
data = googleplay_pb2.ResponseWrapper.FromString(response.content)
tocResponse = data.payload.tocResponse
if utils.hasTosContent(tocResponse) and utils.hasTosToken(tocResponse):
self.acceptTos(tocResponse.tosToken)
if utils.hasCookie(tocResponse):
self.dfeCookie = tocResponse.cookie
return utils.parseProtobufObj(tocResponse)
def acceptTos(self, tosToken):
params = {
"tost": tosToken,
"toscme": "false"
}
response = requests.get(ACCEPT_TOS_URL,
headers=self.getHeaders(),
params=params,
verify=ssl_verify,
timeout=60,
proxies=self.proxies_config)
data = googleplay_pb2.ResponseWrapper.FromString(response.content)
return utils.parseProtobufObj(data.payload.acceptTosResponse)
@staticmethod
def getDevicesCodenames():
return config.getDevicesCodenames()
@staticmethod
def getDevicesReadableNames():
return config.getDevicesReadableNames() | zoro-gpapi | /zoro-gpapi-0.4.5.tar.gz/zoro-gpapi-0.4.5/gpapi/googleplay.py | googleplay.py |
====
zoro
====
Before we introduce you to zoro, please do not confuse it with Zorro_ which is
a networking library that has nothing to do with this one.
The name 'zoro' comes from a Japanese onomatopoeic phrase 'zoro zoro' which
signifies a sound a horde makes as it goes somewhere.
Having to deal with a build process that involves both backend and frontend
code, transpiled languages like CoffeScript, LiveScript, LESS or Compass,
exotic configuration files or settings that need to be swapped before going
into production... The horde of tasks that need to be completed before a
project can be served to the masses can be hard to deal with without software
to help us. On the other hand, learning a complex build system may seem like a
chore.
Zoro tries to fill the gap between writing small (shell) scripts, and mastering
a full build system like Buildout. I chose Python for this task, not only
because I develop Python software, but also because of its vast standard
library that can simplify many tasks without pulling in dozens of third-party
libraries. Zoro is also a simple and pure Python module, so you do not need
anything other than the Python interpreter installed on your system. This makes
it not only easy to install, but also portable across platforms.
In fact, the ``zoro`` module itself does not hide any of the modules and
functions it imports from the standard library, so you can ``from zoro import
*`` to access them witout having to add many lines of imports. Further more,
though its API, zoro tries to stay as close to bare Python as possible. After
all, why invent a new language if there is already a good one (or ten).
.. contents::
License
=======
Zoro is released under MIT license. See the source code for copyright and
license.
Installation
============
You can install zoro frim PyPI as usual::
easy_install zoro
or::
pip install zoro
Basic concept
=============
Somewhat similar to GNU Make, zoro allows you to easily define build targets,
and run various commands within them. This is achieved through the use of
``zoro.Make`` class. Let's take a look at a real-life example of such a class
and discuss its usage. ::
#!/usr/bin/env python
from zoro import *
class Targets(Make):
"""
Build my project.
"""
def __init__(self):
super(Targets, self).__init__()
self._parser.add_options('-k', '--key', help='API key',
dest='api_key')
def dev(self):
""" start test runners, servers and compilers """
wait_for_interrupt(
run(node_local('lsc -cwbo static/js src/ls')),
run('compass watch -c tools/compass.rb'),
run(python('app/main'), env={'APPENV': 'development'}),
)
def test(self, args, options):
""" run unit tests """
wait_for_interrupt(
watch(python('tools/run_tests'), '.', '*.py'),
)
def build(self):
""" prepares the project for deployment """
self.clean()
copytree('app', 'build/app')
copytree('static', 'build/static')
patch('build/app/conf.py', lambda s: self._patch_key(s))
cleanup('build', '*.pyc')
run(python('tools/cachebust build/app/templates'), wait=True)
def _patch_key(self, s):
key = self._options.k.api_key
if key is None:
err('No API key specified', 1)
return re.sub(r"API_KEY='[^']+'", "API_KEY='%s'" % key, s)
if __name__ == '__main__':
Targets()()
This file is usually saved as 'zorofile' in the project's root directory. The
shebang line at the top of the file allows us to run this file without
explicitly naming the interpreter (on Linux and UNIX systems at least). On
Windows we also include a 'zorofile.cmd' file to go with it. The contents of
the file may look like this::
@echo off
python zorofile %*
Now we can start calling the zorofile directly.
Importing zoro functions
~~~~~~~~~~~~~~~~~~~~~~~~
Normally, when using zoro, we import everything from ``zoro`` module with::
from zoro import *
This pulls in not only the functions and classes defined by zoro itself, but
also anything and evertyhing zoro itself imports. This includes (among other
things, the ``os`` module, ``sys``, ``time``, ``platform``, ``shlex``,
``datetime``, etc). For a full listing of what's imported, you should look at
the source code.
Targets class
~~~~~~~~~~~~~
The next thing we notice is the ``Targets`` class. It's a subclass of the
``zoro.Make`` class, and we use it to house all our build targets, as well as
any utility methods we might need.
The constructor
~~~~~~~~~~~~~~~
The constructor of the ``zoro.Make`` class builds a parser object (created by
``optparse`` module). The parser is used to define and parse command line
arguments passed to our zorofile. In our subclass, we augment the default
parser with a new option '-k', which we will use to pass a production API key
during the build process.
Parsed positional arguments and options are stored as ``_args`` and
``_options`` instance attributes respectively and can be access by all instance
methods.
Targets and utilities
~~~~~~~~~~~~~~~~~~~~~
Let's cover the utility methods first. In our example, we have one utility
method which replaces the API key in our configuration module. The reason we
made it an instance method instead of a function defined outside the class is
that this way we have access to all properties on the class, including the
``_options`` attribute mentioned in the previous section.
The reason utility methods are prefixed with an underscore is that methods
without a leading underscore will be treated as build targets.
You will also note that we are using the ``re`` module without explicitly
importing it. We can do that because it is already imported in the ``zoro``
module.
Apart from the constructor and the utility method, there are also three build
targets: 'dev', 'test', and 'build'. All three targets are normal Python
methods. They have docstrings of which the first lines are used in help message
when the zorofile is run with the '-l' switch.
The 'dev' target is used when we want to develop the application. It
facilitates live compilation of LiveScript_ and Compass_ code and runs our
application's built-in development server. This is achieved by using the
``zoro.run()`` function.
The ``zoro.run()`` function executes commands asyncrhonously by default. This
means that the function itself returns before the command exits. This is
convenient because the commands in the 'dev' target will run indefinitely until
they receive a keyboard interrupt.
The first command is passed to ``zoro.node_local()`` function. This function
constructs the correct path for the locally installed NodeJS_ dependencies. The
actual command to run is dependent on the platform we are on, and this function
also takes care of ironing out the differences.
Third command is a python script, so we are passing it to ``zoro.python()``
function, which prepends 'python' and appends the '.py' extension. You will
also notice that the third command uses an ``env`` keyword argument to the
``zoro.run()`` function. This allows us to override or add envionrment
variables specifically for that command.
All three commands in the 'dev' target are wrapped in
``zoro.wait_for_interrupt()`` call. This function takes child process objects
or watchdog_ observers as positional arguments, and terminates them all when
the zorofile receives a keyboard interrupt. Because ``zoro.run()`` returns a
child process object for the command it executes, we can pass its return value
directly to ``zoro.wait_for_interrupt()``.
The second target, 'test', looks very similar to the 'dev' target, but it runs
its command using ``zoro.watch()`` instead of ``zoro.run()``. The
``zoro.watch()`` function takes three arguments. The first one is the same as
``zoro.run()``. The second argument is a path that should be monitored for
changes and the last argument is a glob pattern to use as a filter. Whenever a
file or directory under the monitored path, matching the specified glob
pattern, is modified, the command is executed. This allows us to rerun our
tests whenever we modify a Python module.
Finally, the 'build' target creates a 'build' directory and prepares the code
for deployment. It uses the ``shutil.copytree()`` function to copy the
directories into the target directory, calls ``zoro.patch()`` to patch the
configuration file with the help from the utility method, and uses
``zoro.cleanup()`` to remove unneeded files.
Running the targets
~~~~~~~~~~~~~~~~~~~
To run the targets, we need to call the instance of our ``Targets`` class. This
is done in an ``if`` block so that it is only run when the zorofile is called
directly.
API documentation
=================
There is no separate API documentation, but you will find the source code to be
well-documented. The code is less than 700 lines *with* inline documentation,
so you should just dig in. You will find examples for each function in the
docstrings.
Reporting bugs
==============
Please report any bugs to the project's `BitBucket bugtracker`_.
.. _Zorro: https://pypi.python.org/pypi/Zorro
.. _LiveScript: http://livescript.net/
.. _Compass: http://compass-style.org/
.. _watchdog: http://pythonhosted.org//watchdog/
.. _BitBucket bugtracker: https://bitbucket.org/brankovukelic/zoro/issues
| zoro | /zoro-1.14.tar.gz/zoro-1.14/README.rst | README.rst |
from __future__ import unicode_literals, print_function
import re
import shlex
import subprocess
import platform
import os
import time
import sys
import shutil
import fnmatch
import stat
import optparse
import datetime
from shutil import (copy, copy2, copytree, rmtree, move)
from os import (remove, removedirs, rename, renames, rmdir)
try:
from watchdog.events import PatternMatchingEventHandler
nowatch = False
from watchdog.observers import Observer
from watchdog.observers.read_directory_changes import WindowsApiObserver
except ValueError:
# This only happens on Windows
pass
except ImportError:
# watchdog is not installed
nowatch = True
__author__ = 'Branko Vukelic'
__version__ = '1.14'
ZORO_CONF = {
'debug': False
}
class NoTargetError(Exception):
pass
if nowatch is False:
class PatternWatcher(PatternMatchingEventHandler):
""" Event handler used for monitoring file changes with watchdog
This is a subclass of ``watchdog.events.PatternMatchingEventHandler``,
so it is suitable for watching glob patterns within a subtree of a
project. It is not meant to be used directly, but through a wrapper
function ``watch()``.
Please do not rely on this class' API since it's considered an internal
implementation detail and not offered as an API.
"""
def __init__(self, fn, *args, **kwargs):
self.fn = fn
super(PatternWatcher, self).__init__(*args, **kwargs)
def run(self, reason='some unknown fs event'):
print('Restarting at %s because of %s' % (datetime.datetime.now(),
reason))
self.fn()
def on_modified(self, event):
self.run('file/directory modification')
# You can use this to print separators
separator = '\n\n-------------------------------------------------------\n\n'
def environ(name):
""" Read environment variable
This is a shortcut for ``os.environ.get()`` call.
"""
return os.environ.get(name)
def setenv(name, value):
""" Sets the environment variable """
os.environ[name] = value
def venv():
""" Gets the name of the virtualenv we are in
This function will return the value of the ``VIRTUAL_ENV`` environment
variable, which should correspond to the name of the active virtualenv (if
any).
"""
return environ('VIRTUAL_ENV')
def get_filename(path):
""" Returns filename without extension
This function extracts the filename without extension from a full path.
"""
return os.path.splitext(os.path.basename(path))[0]
def where(cmd):
""" Looks up the PATH and finds executables returning None if not found
This is a Python version of commands like ``where`` and ``which``. It will
look for executables on the system ``PATH`` and return the full path for
the command. On platforms like Windows, you do not need to specify any of
the common executable extensions since the function will try '.exe',
'.cmd', and '.bat' automatically.
The intention is to make this function completely platform independent so
this function should be used in preference over native shell commands.
This function is a modified version of a `StackOverflow answer`_, which
takes into account the common aliases for commands on Windows OS.
.. _StackOverflow answer: http://stackoverflow.com/a/377028/234932
"""
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
filename = os.path.basename(cmd)
filedir = os.path.dirname(cmd)
if filedir:
if is_exe(cmd):
return cmd
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
executable = os.path.join(path, cmd)
if is_exe(executable):
return executable
if platform.system() != 'Windows':
return None
if cmd.endswith('.exe'):
return where('%s.cmd' % get_filename(cmd))
if cmd.endswith('.cmd'):
return where('%s.bat' % get_filename(cmd))
if cmd.endswith('.bat'):
return None
return where('%s.exe' % get_filename(cmd))
def yesno(prompt, default='y'):
""" Asks a yes/no question and return the answer as boolean
Prompts the user for a choce between Yes and No ('y' and 'n'). The return
value is a boolean value that tells us whether user answered 'y'.
Choices are resitricted to 'y' and 'n', and the function will not allow any
other values.
The ``default`` argument can be used to specify a letter that should be the
default (default is 'y'). Depending on the this argument the prompt will be
suffixed with either '[Y/n]' or '[y/N]' where the upper-case letter is
supposed to represent the default.
Example::
>>> yesno('Can you see this?', default='n')
Can you see this? [y/N]: _
"""
if default == 'y':
choices = '[Y/n]'
else:
choices = '[y/N]'
def get_choice():
s = raw_input('%s %s: ' % (prompt, choices))
if s:
return s[0].lower()
return default
choice = get_choice()
while choice and choice not in ['y', 'n']:
print("Please type 'y' or 'n'")
choice = get_choice()
choice = choice or default
return choice == 'y'
def ask(prompt, default=None):
""" Obtains user input with optional default if there is no input
Prompts the user for input and returns the entered value. If the default
value is supplied (and is not None), the prompt will be suffixed with
'[DEFAULT]' where 'DEFAULT' is the value of the ``default`` argument.
Example::
>>> ask('What is your name?', default='Anonymous')
What is your name? [Anonymous]: _
"""
if default is not None:
prompt += ' [%s]' % default
return raw_input('%s: ' % prompt) or default
def err(msg, exit=False):
""" Writes message to ``stderr`` and exits if exit is not False
The main function of this script is to write to ``stderr``.
The second argument represent the status code that should be returned on
exit. When this is set to ``False`` the function will no cause the script
to terminate (which is the default), but only write to stderr.
Example::
>>> err('Oops!', 1)
"""
sys.stderr.write(msg)
if exit is not False:
sys.exit(exit)
def write(path, content):
""" Writes content to a file at path
This function is a user-friendly way to write to files. It will prompt the
user before writing to existing files, and it will do basic housekeeping by
closing the file after writing.
Its writes are not encoded, so you should make sure the content is properly
encoded before writing. You should also note that it will write using the
``w`` flag, so any contents in existing files will be overwritten.
>>> write('example.txt', 'This is just an example')
>>> write('existing.txt', 'I killed your filez, muwahaha!')
existing.txt exists. Overwrite? [y/N]: _
"""
if os.path.exists(path) and not yesno('%s exists. Overwrite?' % path, 'n'):
return
f = open(path, 'w')
f.write(content)
f.close()
def read(path):
""" Reads a file
Reads from a file and returns its unprocessed contents. If the file does
not exist, ``None`` is returned instead.
>>> read('example.txt')
'This is just an example'
"""
if not os.path.exists(path):
return
f = open(path, 'r')
content = f.read()
f.close()
return content
def install_requirements(path, options='', wait=True):
""" Install from requirements file using pip """
return run('pip install %s -r %s' % (options, path), wait=wait)
def pip_install(package_name, options='', wait=True):
""" Install a package using pip """
return run('pip install %s %s' % (options, package_name), wait=wait)
def easy_install(package_name, options='', wait=True):
""" Install a package using easy_install """
return run('easy_install %s %s' % (options, package_name), wait=wait)
def install_node_requirements(options='', wait=True):
""" Install requirements from package.json using NPM """
return run('npm install %s' % options, wait=wait)
def npm_install(package_name, options='', wait=True):
""" Install a package using NPM """
return run('npm install %s %s' % (options, package_name), wait=wait)
def patch(path, patch_fn):
""" Patches a file using the patch_fn
This function will open the specified path, read it contents, pass it to
``patch_fn``, and write back the function's return value to the same file.
Example::
>>> patch('example.txt', lambda s: s.upper())
>>> read('example.txt')
'THIS IS JUST AN EXAMPLE'
"""
if not os.path.exists(path):
return
f = open(path, 'r')
content = f.read()
f.close()
f = open(path, 'w')
f.write(patch_fn(content))
f.close()
def cleanup(path, pattern):
""" Remove all files and directories under ``path`` that match ``pattern``
This function takes a root path, and a glob pattern, and removes any files
or directories that match the pattern recursively.
Example::
>>> cleanup('src', '*.pyc')
"""
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, pattern):
remove(os.path.join(root, filename))
for dirname in fnmatch.filter(dirs, pattern):
rmtree(os.path.join(root, dirname))
def cmdargs(cmd):
""" Splits the command into list using shlex, and leaves lists intact
This function is a wrapper around ``shlex.split()`` which splits strings
and leaves iterables alone.
It is used internally throughout the ``zoro`` module to allow specifying of
commands as both strings, and lists and tuples.
Example::
>>> cmdargs('foo bar baz')
['foo', 'bar', 'baz']
>>> cmdargs(['bar', 'foo'])
['bar', 'foo']
"""
if not hasattr(cmd, '__iter__'):
return shlex.split(cmd)
return cmd
def node_local(cmd):
""" Returns command for locally instaled NodeJS command-line scripts
Builds a local ``node_modules/.bin`` version of the command. This is done
to ease calling NodeJS scripts that are installed inside the project
directory as dependencies.
This function only returns the command as a list. It doesn't check if the
command actually exists, nor does it run it. Use it with ``run()`` or
``watch()``.
Note that any arguments that you supply are preserved.
Example::
>>> node_local('r.js')
['sh', 'node_modules/.bin/r.js']
>>> node_local('r.js -o build.js')
['sh', 'node_modules/.bin/r.js', '-o', 'build.js']
On Windows::
>>> node_local('r.js')
['node_modules/.bin/r.js.cmd']
>>> node_local('r.js -o build.js')
['node_modules/.bin/r.js.cmd', '-o', 'build.js']
"""
cmd = cmdargs(cmd)
cmd[0] = os.path.normpath('node_modules/.bin/%s' % cmd[0])
if platform.system() == 'Windows':
cmd[0] += '.cmd'
else:
cmd.insert(0, 'sh')
return cmd
def python(cmd):
""" Returns command for python scripts
This is a simple utility function that adds a ``.py`` extension to
specified command and prefixes it with ``python``.
The function only returns the command as a list. It doesn't check whether
the script exists or whether it needs a ``.py`` extension, nor does it run
the command. Use it with ``run()`` and ``watch()``.
Example::
>>> python('toutf sometext.txt')
['python', 'toutf.py', 'sometext.txt']
"""
cmd = cmdargs(cmd)
cmd[0] = '%s.py' % os.path.normpath(cmd[0])
cmd.insert(0, 'python')
return cmd
def run(cmd, wait=False, env={}):
""" Runs a command
Runs a command either asynchronously (default) or synchronusly with
optional environment specific to the subprocess.
The ``cmd`` argument can either be a string or a list.
Note that the default behavior is non-blocking (the function will not block
and wait for the command to exit). If you want the function to block until
the command is finished, pass ``True`` as ``wait`` argument.
If you want the command to run with environment variables that you do not
want to set globally, run it with ``env`` argument. The argument should be
a dict of variable-value pairs.
Return value of the function is a handle for the child process. This is a
return value of ``subprocess.Popen()``, so look at the Python documentation
for more details on how to use this value.
Any commands you run will share the STDOUT and STDIN with the parent
process. Because of this, running multiple commands in parallel
(asynchronously, without the ``wait`` flag) will cause their output to be
mixed in the terminal in which the calling script was run. This is
intentional.
Example::
>>> run('compass watch') # run will not wait for watch to end
>>> run('compass compile', wait=True) # run waits for compile to end
>>> run(python('myapp'), env={'APP_ENV': 'DEVELOPMENT'})
"""
cmd = cmdargs(cmd)
env = os.environ.copy().update(env)
shell = False
if sys.platform == 'win32':
# On Widows we want to use shell mode, but not on Linux
shell = True
if ZORO_CONF['debug']:
print('Running command: %s' % cmd)
child = subprocess.Popen(cmd, shell=shell, env=env)
if wait:
child.wait()
return child
def watch(cmd, watch_path, watch_patterns):
""" Watches a path and runs the command whenever files/directories change
Note that this featrue is highly experimental and is known to fail from
time to time.
The ``cmd`` argument can be either an iterable or a string.
This function will set up watchdog_ to monitor a path specified using the
``watch_path`` argument with ``watch_pattern`` glob pattern filter, and run
the command each time there is a change. The commands are run in blocking
mode using ``run()`` with ``wait`` flag, but the ``watch()`` function
itself is non-blocking.
The command is run at least once before path monitoring starts.
Note that the function will raise a ``RuntimeError`` exception if watchdog
is not installed or cannot be imported.
The function's return value is a watchdog observer.
.. _watchdog: https://pypi.python.org/pypi/watchdog
"""
if nowatch:
raise RuntimeError('Watchdog is not installed.')
run(cmd, True)
watch_path = os.path.abspath(os.path.normpath(watch_path))
print('Watching %s' % watch_path)
if platform.system() == 'Windows':
observer = WindowsApiObserver()
else:
observer = Observer()
handler = PatternWatcher(
fn=lambda: run(cmd, True),
patterns=watch_patterns
)
observer.schedule(handler, os.path.normpath(watch_path), recursive=True)
observer.start()
return observer
def wait_for_interrupt(*children):
""" Waits for keyboard interrupt and terminates child processes
This function takes any number of child processes or watchdog observers as
positional arguments and waits for keyboard interrupt, terminating the
processes and stopping observers.
A prompt 'Press Ctrl-C to terminate' will be shown when it's called.
This function is blocking. It will not return at any time. It will cause
the parent script to exist with 0 status on interrupt.
Note that this function does not check if arguments are child processes or
observers. It will simply terminate/stop anything it can and exit.
It can be used (and it is meant to be used) with functions like ``run()``
and ``watch()``.
Example::
>>> wait_for_interrupt(run('compass watch'))
Press Ctrl-C to terminate
"""
print('Press Ctrl-C to terminate')
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
for c in children:
if hasattr(c, 'terminate'):
c.terminate()
if hasattr(c, 'stop'):
c.stop()
sys.exit(0)
class Make(object):
""" Build management class
This class is used to organize your build files called zorofiles. The
instances are callable, and the build file is executed by calling them.
This class is meant to be subclassed and not used directly. When
subclassing ``Make``, be sure to add a docstring to your subclass since it
is used as a help message when running the build script without any options
or with the -h switch.
To define build targets, simply add methods to your subclass. Each method
may have a docstring which is used to display help message when listing
targets using the -l switch.
A typical example of a build script would look like this (triple-quotes are
replaced with double quotes since the example appears in docstrings)::
class MyBuild(Make):
"" Build my project ""
def target(self):
"" Do something here ""
pass
if __name__ == '__main__':
MyBuild()()
The class is called twice, once to create an instance, and once to call the
instance.
"""
_args = None
_options = None
def __init__(self):
""" Create the parser object
The sole purpose of the constructor is to build the parser object. If
you wish to customize the process, you should overload the constructor
and build your own parser. The parser object is expected to implement
the ``optparse`` API and be added to the instance as ``_parser``
attribute.
An idiomatic way to customize the parser would be to call the
superclass' constructor and then manipulate the parser object. For
instance::
class MyBuild(Make):
"" Build my project ""
def __init__(self):
super(MyBuild, self).__init__()
self._parser.add_option('-q', '--quiet',
action='store_false', ...)
Since all arguments and options are passed to the build target methods,
you should add all desired options here.
"""
doc = self.__doc__ or ''
self._parser = optparse.OptionParser(
usage='%prog TARGET [TARGET...] [options]',
epilog=' '.join([l.strip() for l in doc.split('\n')])
)
self._parser.add_option('-l', '--list', action='store_true',
help='list all available targets')
def __call__(self):
""" Execute the build
Calling the instance parses all the command line options and arguments
passed to the script, and has three possible outcomes depending on
them. If no target (first positional argument) is passed, it will
either print full usage instructions or a list of targets (if -l)
switch is passed. If the target argument is present, the specified
target is executed. Finally, if the target name is there, but the
instance does not define such a target, an error is shown, and the
script exists with status code 1.
"""
(options, args) = self._parser.parse_args()
self._options = options
self._args = args
if options.list is True:
self._parser.print_usage()
print('Available targets:\n')
print(self._list())
elif len(args):
target = args.pop(0)
if hasattr(self, target):
method = getattr(self, target)
print('Running task: %s' % method.__name__)
method()
else:
err("No target with name '%s'.\nRun this script without "
'arguments to see a list of targets.\n' % target, 1)
else:
self._parser.print_help()
def _list(self):
""" Return a formatted list of targets
This method is an internal helper method. You generally shouldn't need
to overload this method except for purely cosmetic purposes.
"""
targets = []
for name in dir(self):
if not name.startswith('_'):
target = getattr(self, name)
docs = target.__doc__ or ' undocumented'
docs = docs.split('\n')[0].strip()
targets.append('%12s: %s' % (name, docs))
return '\n'.join(targets) | zoro | /zoro-1.14.tar.gz/zoro-1.14/zoro.py | zoro.py |
# ZORP: A helpful GWAS parser

## Why?
ZORP is intended to abstract away differences in file formats, and help you work with GWAS data from many
different sources.
- Provide a single unified interface to read text, gzip, or tabixed data
- Separation of concerns between reading and parsing (with parsers that can handle the most common file formats)
- Includes helpers to auto-detect data format and filter for variants of interest
## Why not?
ZORP provides a high level abstraction. This means that it is convenient, at the expense of speed.
For GWAS files, ZORP does not sort the data for you, because doing so in python would be quite slow. You will still
need to do some basic data preparation before using.
## Installation
By default, zorp installs with as few python dependencies as practical. For more performance, and to use special
features, install the additional required dependencies as follows:
`$ pip install zorp[perf,lookups]`
The snp-to-rsid lookup requires a very large file in order to work efficiently. You can download the pre-generated file
using the `zorp-assets` command line script, as follows.
(use "--no-update" to skip warnings about already having the latest version)
```bash
$ zorp-assets download --type snp_to_rsid --tag genome_build GRCh37 --no-update
$ zorp-assets download --type snp_to_rsid --tag genome_build GRCh37
```
Or build it manually (which may require first downloading a large source file):
`$ zorp-assets build --type snp_to_rsid --tag genome_build GRCh37`
Assets will be downloaded to the least user-specific location available, which may be overridden by setting the
environment variable `ZORP_ASSETS_DIR`. Run `zorp-assets show --all` to see the currently selected asset directory.
### A note on rsID lookups
When developing on your laptop, you may not wish to download 16 GB of data per rsID lookup. A much smaller "test"
dataset is available, which contains rsID data for a handful of pre-selected genes of known biological functionality.
`$ zorp-assets download --type snp_to_rsid_test --tag genome_build GRCh37`
To use it in your python script, simply add an argument to the SnpToRsid constructor:
`rsid_finder = lookups.SnpToRsid('GRCh37', test=True)`
If you have generated your own lookup using the code in this repo (`make_rsid_lookup.py`), you may also replace
the genome build with a hardcoded path to the LMDB file of lookup data. This use case is fairly uncommon, however.
## Usage
### Python
```python
from zorp import lookups, readers, parsers
# Create a reader instance. This example specifies each option for clarity, but sniffers are provided to auto-detect
# common format options.
sample_parser = parsers.GenericGwasLineParser(marker_col=1, pvalue_col=2, is_neg_log_pvalue=True,
delimiter='\t')
reader = readers.TabixReader('input.bgz', parser=sample_parser, skip_rows=1, skip_errors=True)
# After parsing the data, values of pre-defined fields can be used to perform lookups for the value of one field
# Lookups can be reusable functions with no dependence on zorp
rsid_finder = lookups.SnpToRsid('GRCh37')
reader.add_lookup('rsid', lambda variant: rsid_finder(variant.chrom, variant.pos, variant.ref, variant.alt))
# Sometimes a more powerful syntax is needed- the ability to look up several fields at once, or clean up parsed data
# in some way unique to this dataset
reader.add_transform(lambda variant: mutate_entire_variant(variant))
# We can filter data to the variants of interest. If you use a domain specific parser, columns can be referenced by name
reader.add_filter('chrom', '19') # This row must have the specified value for the "chrom" field
reader.add_filter(lambda row: row.neg_log_pvalue > 7.301) # Provide a function that can operate on all parsed fields
reader.add_filter('neg_log_pvalue') # Exclude values with missing data for the named field
# Iteration returns containers of cleaned, parsed data (with fields accessible by name).
for row in reader:
print(row.chrom)
# Tabix files support iterating over all or part of the file
for row in reader.fetch('X', 500_000, 1_000_000):
print(row)
# Write a compressed, tabix-indexed file containing the subset of variants that match filters, choosing only specific
# columns. The data written out will be cleaned and standardized by the parser into a well-defined format.
out_fn = reader.write('outfile.txt', columns=['chrom', 'pos', 'pvalue'], make_tabix=True)
# Real data is often messy. If a line fails to parse, the problem will be recorded.
for number, message, raw_line in reader.errors:
print('Line {} failed to parse: {}'.format(number, message))
```
### Command line file conversion
The file conversion feature of zorp is also available as a command line utility. See `zorp-convert --help` for details
and the full list of supported options.
This utility is currently in beta; please inspect the results carefully.
To auto-detect columns based on a library of commonly known file formats:
`$ zorp-convert --auto infile.txt --dest outfile.txt --compress`
Or specify your data columns exactly:
`$ zorp-convert infile.txt --dest outfile.txt --index --skip-rows 1 --chrom_col 1 --pos_col 2 --ref_col 3 --alt_col 4 --pvalue_col 5 --beta_col 6 --stderr_beta_col 7 --allele_freq_col 8`
The `--index` option requires that your file be sorted first. If not, you can tabix the standard output format manually
as follows.
```
$ (head -n 1 <filename.txt> && tail -n +2 <file> | sort -k1,1 -k 2,2n) | bgzip > <filename.sorted.gz>
$ tabix <filename.sorted.gz> -p vcf
```
## Development
To install dependencies and run in development mode:
`pip install -e '.[test,perf,lookups]'`
To run unit tests, use
```bash
$ flake8 zorp
$ mypy zorp
$ pytest tests/
```
| zorp | /zorp-0.3.8.tar.gz/zorp-0.3.8/README.md | README.md |
[](https://codecov.io/gh/epw505/zorro_df)

# Zorro DF
Zorro DF is a python package for masking pandas dataframe objects in order to
anonymise data. It allows you to strip away identifiable column names and string
values, replacing them with a generic naming convention. The package is built
under the scikit-learn transformer framework and hence can be plugged into any
scikit-learn Pipeline.
The package source-code can be found at http://github.com/epw505/zorro_df
## Getting Started
### Requirements
```
pandas>=0.25.3
scikit-learn>=0.22.1
```
### Installation
Zorro DF can be installed using `pip` with the following command:
```
pip install zorro_df
```
## Examples
Once the package is installed, you can load Zorro DF into your python session
and use the Masker object to mask your data.
```
from zorro_df import mask_dataframe as mf
example_masker = mf.Masker()
example_masker.fit(data)
masked_data = example_masker.transform(data)
```
## Tests
The test suite for Zorro DF is built using `pytest` with the `pytest-mock`
plugin. Install both as follows.
```
pip install pytest
pip install pytest-mock
```
Once they are installed, you can run the test suite from the root directory of
Zorro Df.
```
pytest tests/
```
## Future Development
* Reverse masking to allow retrieval of original data
* Additional numerical scaling techniques | zorro-df | /zorro_df-1.1.1.tar.gz/zorro_df-1.1.1/README.md | README.md |
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from zorro_df import numerical_scalers as ns
class Masker(BaseEstimator, TransformerMixin):
"""Class to mask pandas dataframes to make data anonymous.
The class transforms column names to a generic numbered column system. It
also convert categorical levels to a generic numbered system. The mappings
are saved in the Masker object to reference for interparability.
"""
def __init__(self, numerical_scaling=True, scaling_method="MinMaxScaler"):
if not isinstance(numerical_scaling, bool):
raise TypeError("numerical_scaling should be a bool")
if not isinstance(scaling_method, str):
raise TypeError("scaling_method should be a str")
if scaling_method not in ["MinMaxScaler"]:
raise ValueError("{0} is not a recognised scaling method in zorro_df".format(scaling_method))
super().__init__()
self.numerical_scaling = numerical_scaling
self.scaling_method=scaling_method
def get_column_map(self, X):
"""Construct the dictionary map for masking column names.
Parameters
----------
X : pd.DataFrame
Dataframe to take columns from.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError("X should be a pd.DataFrame")
new_columns = ["column_" + str(n) for n in range(0, X.shape[1])]
self.column_map = dict(zip(X.columns, new_columns))
def get_categorical_map(self, X):
"""Construct the dictionary map for masking categorical levels.
Parameters
----------
X : pd.DataFrame
Dataframe to take categorical values from.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError("X should be a pd.DataFrame")
categorical_columns = list(X.select_dtypes(include=["object", "category"]))
self.categorical_map = {}
for col in categorical_columns:
value_map = {}
unique_vals = X[col].unique()
for i, j in enumerate(unique_vals):
value_map[j] = "level_" + str(i)
self.categorical_map[col] = value_map
def get_numerical_map(self, X):
"""Construct the dictionary of scalers fir scaling numerical features.
Parameters
----------
X : pd.DataFrame
Dataframe to take numerical values from.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError("X should be a pd.DataFrame")
numerical_columns = list(X.select_dtypes(exclude=["object", "category", "datetime"]))
self.numerical_map = {}
for col in numerical_columns:
self.numerical_map[col] = eval("ns." + self.scaling_method + "(X[col])")
def fit(self, X, y=None):
"""Fits the Masker class to the training data.
This makes a call to get_categorical_map and get_column_map for a given
dataframe.
Parameters
----------
X : pd.DataFrame
Dataframe to generate the maps from.
y : None
Not required, only there for scikit-learn functionality.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError("X should be a pd.DataFrame")
self.get_categorical_map(X)
self.get_column_map(X)
self.get_numerical_map(X)
return self
def transform(self, X, y=None):
"""Masks the dataframe using the maps generated in the fit method.
This can only be called once the transformer has been fit. Also, the
dataframe you're transforming should match the data that the
transformer was fit on.
Parameters
----------
X : pd.DataFrame
Dataframe to mask.
y : None
Not required, only there for scikit-learn functionality.
"""
if not isinstance(X, pd.DataFrame):
raise TypeError("X should be a pd.DataFrame")
X = X.copy()
for col, col_map in self.categorical_map.items():
X[col] = X[col].map(col_map)
if self.numerical_scaling:
for col, scaler in self.numerical_map.items():
X[col] = scaler.scale_array()
X.columns = self.column_map.values()
return X | zorro-df | /zorro_df-1.1.1.tar.gz/zorro_df-1.1.1/zorro_df/mask_dataframe.py | mask_dataframe.py |
import numpy as np
import pandas as pd
class Scaler(object):
"""Scaler class containing key functionality for scaling.
Specific scaling classes inherit from this parent class.
Parameters
----------
array_like : array_like
1d array of numerical values to be scaled.
Attributes
----------
array_like : array_like
"""
def __init__(self, array_like):
if np.ndim(array_like) != 1:
raise TypeError("array_like should be an array-like with np.ndim==1")
for val in array_like:
if np.isnan(val):
raise ValueError("array_like contains np.NaN value")
if type(val) not in [int, float]:
raise TypeError("array_like should contain only numeric values")
self.array_like = array_like
self.array_type = type(array_like)
def convert_array_type(self, array_like, new_type):
"""Converts an array to a given type.
The funtion converts an array-like object to either a pandas Series or
an numpy ndarray. If neither of those, it defaults to creating a list.
Parameters
----------
array_like : array_like
1d numerical array to be converted to given type.
new_type : type
Given type to convert the data to.
Returns
-------
array_like : array_like
1d numerical array after type conversion.
"""
if new_type in [list, pd.Series]:
array_like = new_type(array_like)
elif new_type == np.ndarray:
array_like = np.array(array_like)
return array_like
def get_min_max_values(self):
"""Calculates the minimum and maximum of a numerical array-like object.
The output is a tuple, with first and second values
representing the min and max respectively.
Attributes
-------
min_max_vals : tuple
Tuple contating min and max values for the passed array-like.
"""
min_val = min(self.array_like)
max_val = max(self.array_like)
self.min_max_val = (min_val, max_val)
class MinMaxScaler(Scaler):
"""Class to scale numerical values using the min max method.
Parameters
----------
array_like : array_like
1d array of numerical variables to be scaled.
Attributes
----------
array_like : array_like
1d array of numerical variables to be scaled.
min_max_val : tuple
Tuple of the minimum and maximum values of the array_like.
"""
def __init__(self, array_like):
super().__init__(array_like=array_like)
self.get_min_max_values()
def min_max_scaling(self, x, min, max):
"""Numerical computation for min_max scaling a value.
Parameters
----------
x : float
Value to be scaled.
min : float
Minimum value of the 1d array that x is contained in.
max : float
Maximum value of the 1d array that x is contained in.
Returns
-------
x : float
Scaled value for the given input.
"""
if x is np.NaN:
raise ValueError("x value is np.NaN")
if max-min == 0:
return 0
x = (x - min) / (max - min)
return x
def scale_array(self):
"""Scale the array_like that the object was initialised with.
This method scales the array_like and saves the new array as an
attribute. It also returns this array.
Attributes
----------
scaled_array : array_like
1d array of scaled numerical values.
Returns
-------
scaled_array : array_like
1d array of scaled numerical values.
"""
# List comprehension, using the min_max_scaling function deifned above.
self.scaled_array = [
self.min_max_scaling(
x=x,
min=self.min_max_val[0],
max=self.min_max_val[1],
) for x in self.array_like
]
self.convert_array_type(self.scaled_array, self.array_type)
return self.scaled_array | zorro-df | /zorro_df-1.1.1.tar.gz/zorro_df-1.1.1/zorro_df/numerical_scalers.py | numerical_scalers.py |
from enum import Enum
import functools
from functools import wraps
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from beartype import beartype
from beartype.typing import Tuple, Optional, Union
from torchaudio.transforms import Spectrogram
# constants
class TokenTypes(Enum):
AUDIO = 0
VIDEO = 1
FUSION = 2
GLOBAL = 3
# functions
def exists(val):
return val is not None
def default(*args):
for arg in args:
if exists(arg):
return arg
return None
def round_down_nearest_multiple(n, divisor):
return n // divisor * divisor
def pair(t):
return (t, t) if not isinstance(t, tuple) else t
def cum_mul(it):
return functools.reduce(lambda x, y: x * y, it, 1)
def divisible_by(numer, denom):
return (numer % denom) == 0
# decorators
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# bias-less layernorm
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# geglu feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4):
inner_dim = int(dim * mult * 2 / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
nn.Linear(inner_dim, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(
self,
x,
context = None,
attn_mask = None
):
x = self.norm(x)
kv_x = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(kv_x).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(attn_mask):
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# main class
class Zorro(nn.Module):
def __init__(
self,
dim,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
num_fusion_tokens = 16,
audio_patch_size: Union[int, Tuple[int, int]] = 16,
video_patch_size: Union[int, Tuple[int, int]] = 16,
video_temporal_patch_size = 2,
video_channels = 3,
spec_n_fft = 128,
spec_power = 2,
spec_win_length = 24,
spec_hop_length = None,
spec_pad = 0,
spec_center = True,
spec_pad_mode = 'reflect',
spec_aug_stretch_factor = 0.8,
spec_aug_freq_mask = 80,
spec_aug_time_mask = 80,
return_token_types: Tuple[TokenTypes] = (TokenTypes.AUDIO, TokenTypes.VIDEO, TokenTypes.FUSION)
):
super().__init__()
self.max_return_tokens = len(return_token_types)
self.return_token_types = return_token_types
return_token_types_tensor = torch.tensor(list(map(lambda t: t.value, return_token_types)))
self.register_buffer('return_token_types_tensor', return_token_types_tensor, persistent = False)
self.return_tokens = nn.Parameter(torch.randn(self.max_return_tokens, dim))
self.attn_pool = Attention(dim = dim, dim_head = dim_head, heads = heads)
# audio input
self.audio_patch_size = audio_patch_height, audio_patch_width = pair(audio_patch_size)
self.spec = Spectrogram(
n_fft = spec_n_fft,
power = spec_power,
win_length = spec_win_length,
hop_length = spec_hop_length,
pad = spec_pad,
center = spec_center,
pad_mode = spec_pad_mode
)
audio_input_dim = cum_mul(self.audio_patch_size)
self.audio_to_tokens = nn.Sequential(
Rearrange('b (h p1) (w p2) -> b h w (p1 p2)', p1 = audio_patch_height, p2 = audio_patch_width),
nn.LayerNorm(audio_input_dim),
nn.Linear(audio_input_dim, dim),
nn.LayerNorm(dim)
)
# video input
self.video_patch_size = (video_temporal_patch_size, *pair(video_patch_size))
video_input_dim = cum_mul(self.video_patch_size) * video_channels
video_patch_time, video_patch_height, video_patch_width = self.video_patch_size
self.video_to_tokens = nn.Sequential(
Rearrange('b c (t p1) (h p2) (w p3) -> b t h w (c p1 p2 p3)', p1 = video_patch_time, p2 = video_patch_height, p3 = video_patch_width),
nn.LayerNorm(video_input_dim),
nn.Linear(video_input_dim, dim),
nn.LayerNorm(dim)
)
# fusion tokens
self.fusion_tokens = nn.Parameter(torch.randn(num_fusion_tokens, dim))
# transformer
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, dim_head = dim_head, heads = heads),
FeedForward(dim = dim, mult = ff_mult)
]))
self.norm = LayerNorm(dim)
def forward(
self,
*,
audio,
video,
return_token_indices: Optional[Tuple[int]] = None
):
batch, device = audio.shape[0], audio.device
# validate video can be patched
assert all([divisible_by(numer, denom) for denom, numer in zip(self.video_patch_size, tuple(video.shape[-3:]))]), f'video shape {video.shape[-3:]} needs to be divisible by {self.video_patch_size}'
# automatically crop if audio does not yield a 2d spectrogram that is divisible by patch sizes
audio = self.spec(audio)
height, width = audio.shape[-2:]
patch_height, patch_width = self.audio_patch_size
rounded_height, rounded_width = map(lambda args: round_down_nearest_multiple(*args), ((height, patch_height), (width, patch_width)))
if (height, width) != (rounded_height, rounded_width): # just keep printing to be annoying until it is fixed
print_once(f'spectrogram yielded shape of {(height, width)}, but had to be cropped to {(rounded_height, rounded_width)} to be patchified for transformer')
audio = audio[..., :rounded_height, :rounded_width]
# to tokens
audio_tokens = self.audio_to_tokens(audio)
video_tokens = self.video_to_tokens(video)
fusion_tokens = repeat(self.fusion_tokens, 'n d -> b n d', b = batch)
# construct all tokens
audio_tokens, fusion_tokens, video_tokens = map(lambda t: rearrange(t, 'b ... d -> b (...) d'), (audio_tokens, fusion_tokens, video_tokens))
tokens, ps = pack((
audio_tokens,
fusion_tokens,
video_tokens
), 'b * d')
# construct mask (thus zorro)
token_types = torch.tensor(list((
*((TokenTypes.AUDIO.value,) * audio_tokens.shape[-2]),
*((TokenTypes.FUSION.value,) * fusion_tokens.shape[-2]),
*((TokenTypes.VIDEO.value,) * video_tokens.shape[-2]),
)), device = device, dtype = torch.long)
token_types_attend_from = rearrange(token_types, 'i -> i 1')
token_types_attend_to = rearrange(token_types, 'j -> 1 j')
# the logic goes
# every modality, including fusion can attend to self
zorro_mask = token_types_attend_from == token_types_attend_to
# fusion can attend to everything
zorro_mask = zorro_mask | token_types_attend_from == TokenTypes.FUSION.value
# attend and feedforward
for attn, ff in self.layers:
tokens = attn(tokens, attn_mask = zorro_mask) + tokens
tokens = ff(tokens) + tokens
tokens = self.norm(tokens)
# final attention pooling - each modality pool token can only attend to its own tokens
return_tokens = self.return_tokens
return_token_types_tensor = self.return_token_types_tensor
if exists(return_token_indices):
assert len(set(return_token_indices)) == len(return_token_indices), 'all indices must be unique'
assert all([indice < self.max_return_tokens for indice in return_token_indices]), 'indices must range from 0 to max_num_return_tokens - 1'
return_token_indices = torch.tensor(return_token_indices, dtype = torch.long, device = device)
return_token_types_tensor = return_token_types_tensor[return_token_indices]
return_tokens = return_tokens[return_token_indices]
return_tokens = repeat(return_tokens, 'n d -> b n d', b = batch)
pool_mask = rearrange(return_token_types_tensor, 'i -> i 1') == token_types_attend_to
# global queries can attend to all tokens
pool_mask = pool_mask | rearrange(return_token_types_tensor, 'i -> i 1') == torch.ones_like(token_types_attend_to, dtype=torch.long) * TokenTypes.GLOBAL.value
pooled_tokens = self.attn_pool(return_tokens, context = tokens, attn_mask = pool_mask) + return_tokens
return pooled_tokens | zorro-pytorch | /zorro_pytorch-0.1.0-py3-none-any.whl/zorro_pytorch/zorro_pytorch.py | zorro_pytorch.py |
Zorro Image Registration Software
=================================
Author: Robert A. McLeod
Email: [email protected]
Zorro is a package designed for registration of image drift for dose fractionation in electron microscopy. It is specifically designed to suppress correlated noise.
Zorro is currently in beta. Our current priority is to provide better ease of installation, so if you have problems with installing Zorro please do not hesitate to open an issue.
For help on installation please see the wiki page: https://github.com/C-CINA/zorro/wiki
Zorro has the following dependencies:
* `numpy`
* `SciPy`
* `pyFFTW`
And the following optional dependencies (for loading HDF5 and TIFF files respectively):
* `PyTables`
* `scikit-image`
Zorro comes packaged with a modified version of the NumExpr virtual machine called `numexprz` that has support for `complex64` data types.
Zorro is MIT license.
Automator
---------
The Automator for Zorro and 2dx is a GUI interface to Zorro.
It has the following additional dependencies:
* `PySide`
Automator also comes with the Skulk Manager tool which may be used as a daemon to watch a directory for new image stacks and automatically process them.
Automator is LGPL license.
Feature List
------------
* Import: DM4, MRC, HDF5, stacked TIFF
* Apply gain reference to SerialEM 4/8-bit MRC stacks.
* Fourier cropping of super-resolution stacks.
* Can operate on Sun Grid Engine cluster.
* Flexible filters: dose filtering, low-pass filtering
* Stochastic hot pixel filter detects per-stack radiation damage to the detector
* CTF estimation with: CTFFIND4.1, GCTF
* Particle picking with: Gautomatch (alpha-status)
* Independent (separate even-odd frame) and non-independent FRC resolution estimators.
* Archiving with: 7z, pigz, lbzip2
* Output of diagnostic PNGs
Citations
---------
McLeod, R.A., Kowal, J., Ringler, P., Stahlberg, H. 2017. Robust image alignment for cryogenic transmission electron microscopy. J. Struct. Biol. 197:279-293.
http://www.sciencedirect.com/science/article/pii/S1047847716302520
Zorro and Automator make use of or interface with the following 3rd party programs:
CTF estimation CTFFIND4.1:
Rohou, A., Grigorieff, N., 2015. CTFFIND4: Fast and accurate defocus estimation from electron micrographs. Journal of Structural Biology, Recent Advances in Detector Technologies and Applications for Molecular TEM 192, 216-221. doi:10.1016/j.jsb.2015.08.008
CTF estimation from GCTF:
Zhang, K., 2016. Gctf: Real-time CTF determination and correction. Journal of Structural Biology 193, 1-12. doi:10.1016/j.jsb.2015.11.003
4/8-bit MRC from SerialEM:
Mastronarde, D.N. 2005. Automated electron microscope tomography using robust prediction of specimen movements. J. Struct. Biol. 152:36-51.
Zorro's dose filter is ported from Unblur:
Grant, T., Grigorieff, N., 2015. Measuring the optimal exposure for single particle cryo-EM using a 2.6 Å reconstruction of rotavirus VP6. eLife Sciences e06980. doi:10.7554/eLife.06980
| zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/README.rst | README.rst |
import cx_Freeze
import sys, glob, os, os.path
import PySide
#import traceback
##############################################################################################
# Basic way to build this: run it, run the build exe (./zorro) from another terminal,
# include, exclude as needed.
# ALWAYS CALL AS ./zorro in Linux because the exe is not on the system path
# ALWAYS CALL AS ./zorro in Linux because the exe is not on the system path
# ALWAYS CALL AS ./zorro in Linux because the exe is not on the system path
# ALWAYS CALL AS ./zorro in Linux because the exe is not on the system path
#############################################################################################
#############################################################################################
# MUST MODIFY hooks.py IN cx_Freeze AS FOLLOWS:
#def load_scipy(finder, module):
# """the scipy module loads items within itself in a way that causes
# problems without the entire package and a number of other subpackages
# being present."""
# # finder.IncludePackage("scipy.lib")
# finder.IncludePackage("scipy._lib")
# finder.IncludePackage("scipy.misc")
# Monkey-patch cx_Freeze.hooks.load_scipy()
def load_scipy_monkeypatched(finder, module):
"""the scipy module loads items within itself in a way that causes
problems without the entire package and a number of other subpackages
being present."""
finder.IncludePackage("scipy._lib")
finder.IncludePackage("scipy.misc")
cx_Freeze.hooks.load_scipy = load_scipy_monkeypatched
#############################################################################################
# Need to exclude some dependencies that are definitely not needed.
# I would like to exlcude PyQt4 but Matplotlib crashes
if sys.version_info.major == 2:
excludes = [ 'collections.abc', 'Tkinter', 'pandas', ]
else:
excludes = [ 'Tkinter', 'pandas', ]
# Need to add some things scipy has imported in a funny way.
includes = ['scipy.sparse.csgraph._validation',
'scipy.integrate.lsoda',
'scipy.integrate.vode',
'scipy.special._ufuncs_cxx',
'scipy.special._ufuncs',
'scipy.special._ellip_harm_2',
'scipy.sparse.csgraph._validation',
'atexit',
'PySide.QtGui',
'PySide.QtCore',
]
# Ok now we have trouble with the matplotlib.backends
packages = [
'matplotlib.backends.backend_qt4agg',
'PySide.QtGui',
'PySide.QtCore',
'atexit',
]
# Inlcude graphics files. Could also add Readme and such.
include_files = [ "automator/icons/" ]
# Include Qt libs, because by default they are not present.
# Ok, so this does not fix the Qt library mis-match problem.
"""
QtDir = os.path.dirname( PySide.__file__ )
# TODO: Windows support for dll
QtFiles = [ (os.path.join( QtDir, "shiboken.so"), "shiboken.so" ) ]
QtLibGlob = glob.glob( os.path.join( QtDir, "Qt*" ) )
for globItem in QtLibGlob:
QtFiles.append( (globItem, os.path.basename(globItem)) )
include_files += QtFiles
"""
icon = "automator/icons/CINAlogo.png"
buildOptions = dict(
packages = packages,
excludes = excludes,
includes = includes,
include_files = include_files,
icon=icon )
base = 'Console'
if sys.platform == 'win32':
gui_base = "Win32GUI"
else:
gui_base = 'Console'
executables = [
cx_Freeze.Executable( script='zorro/__main__.py',
base=base,
targetName='zorro',
copyDependentFiles=True),
cx_Freeze.Executable( script='automator/__main__.py',
base=gui_base,
targetName='automator',
copyDependentFiles=True )
]
cx_Freeze.setup(name='Zorro',
version = '0.5',
description = 'Zorro image registration software',
options = dict(build_exe = buildOptions),
executables = executables,
) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/cxfreeze_setup.py | cxfreeze_setup.py |
import numpy as np
import zorro
import subprocess as sp
try:
import Queue as queue
except:
import queue
import copy
import os, os.path, glob, time, psutil, sys
import collections
from itertools import count
try:
from PySide import QtCore
except:
print( "ZorroSkulkManager failed to load PySide; no communication with Automator possible" )
#################### zorroState CLASS ###############################
# zorroSTATE state enumerators
# Nice thing about enumerators is Spyder catches syntax errors, whereas string comparisons aren't.
NEW = 0
CHANGING = 1
STABLE = 2
SYNCING = 3
READY = 4
PROCESSING = 5
FINISHED = 6
ARCHIVING = 7
COMPLETE = 8
STALE = 9
HOST_BUSY = 10
HOST_FREE = 11
HOST_ERROR = 12 # An error occured with the skulkHost
RENAME = 13
ERROR = 14 # An error occured in Zorro
ZOMBIE = 15
STATE_NAMES = { NEW:u'new', CHANGING:u'changing', STABLE:u'stable', SYNCING:u'sync', READY:u'ready',
PROCESSING:u'proc', FINISHED:u'fini', ARCHIVING:u'archiving', COMPLETE:u'complete',
STALE:u'stale', HOST_BUSY:u'busy', HOST_FREE:u'free', HOST_ERROR:u'host_error',
RENAME:u'rename', ERROR:u'error', ZOMBIE:u'zombie' }
# These are the default colors. They may be over-ridden if the zorroState is actively being processed.
STATE_COLORS = { NEW:u'darkorange', CHANGING:u'darkorange', STABLE:u'goldenrod', SYNCING:u'goldenrod', READY:u'forestgreen',
PROCESSING:u'forestgreen', FINISHED:u'indigo', ARCHIVING:u'saddlebrown', COMPLETE:u'dimgrey',
STALE:u'firebrick', HOST_BUSY:u'host_busy', HOST_FREE:u'host_free', HOST_ERROR:u'firebrick',
RENAME:u'rename', ERROR:u'firebrick', ZOMBIE:u'firebrick' }
REVERSE_STATE_NAMES = dict(zip(STATE_NAMES.values(), STATE_NAMES.keys()))
# Temporary
SORT_BY_ADD_ORDER = True
class zorroState(object):
"""
zorroState contains a number of metadata attributes about a zorro job. Principally it controls the state
flow control logic, as to what the next step in the processing pipeline should be.
zorroState sorts based on priority.
state is an enum as above in the globals variables.
KEEP priority in 1st position, key in last, anything else can move about.
Some properties are retrieved automatically, such as mtime and size
It's expected that paths are normalized by skulkManager
"""
__MIN_FILE_SIZE = 1048576
# Global ID assignment for zorroState objects
__idCounter = count(0)
# zorroState( globbedFile, self.zorroDefault, self.paths, self.messageQueue )
def __init__(self, name, zorroDefault, paths, messageQueue, notify=True ):
# Hrm, really I should not give this guy the manager, but rather the messageQueue and the paths object
self.paths = paths
self.messageQueue = messageQueue
self.__host = None # Should be None unless a host is actively processing this zorroObj
# Make a unique ID for every zorroState object
# It's a string so we don't confuse it with the position in the skulkHeap
if sys.version_info >= (3,0):
self.id = str( self.__idCounter.__next__() )
else:
self.id = str( self.__idCounter.next() )
self.__priority = -1.0
self.__state = NEW
self.__name = name
self.__wait = -1.0 # SerialEM is quite slow to write files so we need a more conservative wait time
self.waitTime = 5.0
self.zorroObj = copy.deepcopy( zorroDefault )
self.__prior_mtime = None
self.__prior_size = None
# Check if extension is .cfg or .zor, otherwise assume it's data
name_ext = os.path.splitext( name )[1]
if name_ext == ".cfg" or name_ext == ".zor":
self.loadFromLog( self.__name, notify=notify )
# We assume all the files are set appropriately.
else:
# Populate names based on paths
self.setDefaultPaths()
pass
def loadFromLog( self, logName, notify=True ):
# DEBUG: I don't think this is working properly when restarting
# previously generated logs.
self.zorroObj.loadConfig( logName )
self.__state = REVERSE_STATE_NAMES[ self.zorroObj.METAstatus ]
self.decrementState()
self.__name = self.zorroObj.files[u'config']
self.stackPriority()
# print( "DEBUG: found state %d in file from %s" %( self.__state, self.zorroObj.METAstatus ))
if bool(notify):
self.messageQueue.put( [self.__state, self] )
def decrementState(self):
# Decrement the state if it was in a process that is interrupted
# TODO: should the state machine be an iterator? Is that feasible?
if self.__state == PROCESSING:
self.__state = READY
self.messageQueue.put( [self.__state, self] )
elif self.__state == SYNCING:
self.__state = STABLE
self.messageQueue.put( [self.__state, self] )
elif self.__state == ARCHIVING:
self.__state = FINISHED
self.messageQueue.put( [self.__state, self] )
def incrementState(self):
# Increment the state if it was in a process that successfully finished
if self.__state == PROCESSING:
self.__state = FINISHED
self.messageQueue.put( [self.__state, self] )
elif self.__state == SYNCING:
self.__state = READY
# We have to both rename ourselves and ask to be put into the procHeap here
self.renameToZor()
self.messageQueue.put( [self.__state, self] )
elif self.__state == ARCHIVING:
self.__state = COMPLETE
self.messageQueue.put( [self.__state, self] )
def updateConfig( self, zorroMerge ):
"""
Merge the existing self.zorroObj with the new zorroObj. Generally we
keep the old files dict but replace everything else.
"""
oldFiles = self.zorroObj.files.copy()
self.zorroObj = copy.deepcopy( zorroMerge )
self.zorroObj.files = oldFiles
pass
def setDefaultPaths(self):
"""
Sets expected filenames for config, raw, sum, align, fig
"""
self.zorroObj.files[u'original'] = self.__name
baseName = os.path.basename( self.zorroObj.files[u'original'] )
baseFront, baseExt = os.path.splitext( baseName )
self.zorroObj.files[u'config'] = os.path.join( self.paths[u'output_dir'], baseName + u".zor" )
#print( "DEFAULT PATHS: " + str(self.paths) )
#print( "OUTPUT PATH: " + self.paths['output_dir' ] )
#print( "CONFIG FILE: " + self.zorroObj.files['config'] )
if not bool(self.zorroObj.files['compressor']):
mrcExt = ".mrc"
mrcsExt = ".mrcs"
else:
mrcExt = ".mrcz"
mrcsExt = ".mrcsz"
self.zorroObj.files[u'stack'] = os.path.join( self.paths[u'raw_subdir'], baseName )
self.zorroObj.files[u'sum'] = os.path.join( self.paths[u'sum_subdir'], u"%s_zorro%s" % (baseFront, mrcExt) )
self.zorroObj.files[u'filt'] = os.path.join( self.paths[u'sum_subdir'], u"%s_zorro_filt%s"% (baseFront, mrcExt) )
self.zorroObj.files[u'align'] = os.path.join( self.paths[u'align_subdir'], u"%s_zorro_movie%s"% (baseFront, mrcsExt) )
self.zorroObj.files[u'figurePath'] = self.paths[u'fig_subdir']
self.zorroObj.files[u'gainRef'] = self.paths[u'gainRef']
# if bool( self.paths[u'gainRef'] ):
# # Gainref functionality is not required, so it may be none
# self.zorroObj.files[u'gainRef'] = os.path.join( self.paths[u'output_dir'], self.paths[u'gainRef'] )
def renameToZor(self):
"""
Rename the state from a raw .mrc/.dm4 to a config .zor, after a SYNCING operation
"""
newName = self.zorroObj.files[u'config']
self.__name = newName
self.__state = READY
self.messageQueue.put( [RENAME, self] )
def peek(self):
# Take a peek at the log on the disk for the STATE, used for loading existing logs from disk.
# TODO: zorroState.peek()
logName = self.zorroObj.files[u'config']
if os.path.isfile( logName ):
with open( logName, 'rb' ) as rh:
rh.readline();
METAstatus = rh.readline().split()
if METAstatus[0] == u'METAstatus':
# Reverse STATE_NAMES
return REVERSE_STATE_NAMES[METAstatus[2]]
else:
# We could alternatively just load the config here brute force
raise IOError( "METAstatus is not second line in Zorro log: %s" % logName )
pass
def stackPriority(self, multiplier = 1.0 ):
if self.__name.endswith( "zor" ):
# TODO: if people delete the stack from disk this pops an error
self.__priority = multiplier * os.path.getmtime( self.zorroObj.files['stack'] )
else:
self.__priority = multiplier * os.path.getmtime( self.__name )
def topPriority(self):
self.stackPriority( multiplier = 1E6 )
def update(self):
# Check the state and if it needs to be changed. I could make these functions but pratically if I
# just lay-out the code nicely it's the same thing.
### NEW ###
if self.__state == COMPLETE or self.__state == SYNCING or self.__state == PROCESSING or self.__state == ARCHIVING:
# Do nothing, controlled by skulkHost
pass
elif self.__state == NEW:
# Init and goto CHANGING
self.__prior_mtime = os.path.getmtime( self.__name )
self.__prior_size = os.path.getsize( self.__name )
self.__state = CHANGING
self.messageQueue.put( [self.__state, self] )
### CHANGING ###
elif self.__state == CHANGING:
if self.__name.endswith( "zor" ): # It's a log file, so elevate the state immediatly
self.__state = READY
self.messageQueue.put( [self.__state, self] )
return
# TODO: serialEM is really slow at writing files, so this needs to be
# even more conservative.
# Else: raw file
# Check if file changed since last cycle
newSize = os.path.getsize( self.__name )
newMTime = os.path.getmtime( self.__name )
# Compare and against MINSIZE
if (newMTime == self.__prior_mtime and
newSize == self.__prior_size and
newSize >= self.__MIN_FILE_SIZE ):
# Additional conservative wait for SerialEM writing
if self.__wait < 0.0:
self.__wait = time.time()
elif self.__wait + self.waitTime < time.time():
self.__state = STABLE
self.messageQueue.put( [self.__state, self] )
# self.update() # goto next state immediately
else: # Else Still changing
self.__wait = -1.0
pass
self.__prior_mtime = os.path.getmtime( self.__name )
self.__prior_size = os.path.getsize( self.__name )
### STABLE ###
elif self.__state == STABLE:
# Give a priority based on mtime, newer files are processed first so we always get an
# recent file when working with Automator live
self.stackPriority()
# Check if we need to copy or if we can just rename the file
# This only works on Linux, on Windows we would have to check the drive letter
if self.paths.isLocalInput():
# Rename raw
oldName = self.__name
newName = self.zorroObj.files[u'config']
try:
# print( "DEBUG: try to rename %s to %s" % (oldName, self.zorroObj.files[u'stack']) )
os.rename( oldName, self.zorroObj.files[u'stack'] )
self.renameToZor()
except:
raise
else: # Submit self to the copy heap
self.__state = SYNCING
self.messageQueue.put( [SYNCING, self] )
# TODO: manager needs to handle renaming in this case.
### READY ###
elif self.__state == READY:
# Generate a log file, from here on we have to track the zorroObj persitantly on disk
# setDefaultFilenames was called previously, if we want to call it again in case the user changed
# something we need to do some more checking to see if name is a log or a stack.
self.zorroObj.METAstatus = STATE_NAMES[READY]
self.zorroObj.saveConfig()
# Ask to put self into procHeap
self.__state = PROCESSING
self.messageQueue.put( [PROCESSING, self] )
### FINISHED (ZORRO) ###
elif self.__state == FINISHED:
if self.zorroObj.doCompression:
self.__state = ARCHIVING
self.messageQueue.put( [ARCHIVING, self] )
else:
self.messageQueue.put( [COMPLETE, self] )
self.__state = COMPLETE
self.__priority = -1.0
pass
elif self.__state == ERROR:
self.messageQueue.put( [ERROR, self] )
self.__state = ZOMBIE
self.__priority = -1E6
elif self.__state == ZOMBIE:
# Do nothing.
pass
else:
raise ValueError( "Error, zorroState: unknown/unhandled state enumerator: %d" % self.__state )
def __cmp__(self,other):
# Hrm, this isn't called in python 3
if other > self.priority:
return 1
elif other < self.priority:
return -1
else:
return 0
def __lt__(self,other):
# Only for Python 3? Or does it get called in Python 2 as well?
if other == None:
return False
if isinstance( other, float ):
if other > self.__priority:
return True
else:
return False
elif isinstance( other, zorroState ):
if other.priority > self.__priority:
return True
else:
return False
raise ValueError( "zorroState.__lt__: Could not compare %s to self" % other )
# We don't want to have a method for __repr__ as this is used for finding keys in priorityList and such
# but over-riding __str__ is ok.
def __str__(self):
try:
return "zorroState(%s): %s, state: %s, priority: %f" % (self.id, self.name, STATE_NAMES[self.state], self.priority)
except KeyError:
return "zorroState(%s): %s, state: %s, priority: %f" % (self.id, self.name, str(self.state), self.priority)
@property
def host(self):
raise SyntaxError( "zorroState: no accessing host, set to None if its expired." )
@host.setter
def host(self,host):
if host != None:
self.__host = host
self.update()
# Do I actually need these properties? Are they doing anything?
@property
def priority(self):
return self.__priority
@priority.setter
def priority(self,x):
self.__priority = x
@property
def state(self):
return self.__state
# User should not be able to set state, or flow control will be a huge mess
#@state.setter
#def state(self,x):
# self.__state = x
@property
def statename(self):
return STATE_NAMES[self.__state]
@property
def name(self):
return self.__name
@name.setter
def name(self,x):
self.__name = x
#################### SKULKHEAP CLASS ###############################
class skulkHeap(collections.MutableMapping):
"""
This class manages all the files the watcher in skulkManager finds, and prioritizes them as desired
by the user. It is otherwise actually a stack, not a queue, as the latest files are processed first.
items are stored as zorroStates. Keys can be either integers (reflecting priority positions) or keys
(which are filenames, generally either raw stacks or log files)
"""
def __init__(self):
collections.MutableMapping.__init__(self)
self.__priorityList = list() # format is item=[zorroState.priority, zorroState] for easy sorting
self.__store = dict() # store is a dict with the filenames as keys
self.__mutex = QtCore.QMutex()
def __getitem__(self, key):
self.__mutex.lock()
# __keytransform__ handles the int versus str keys
if type(key) == int:
# It's expensive but we have no real option but to re-sort the list every time in case
# the priorities have changed.
self.__priorityList.sort(reverse=True)
try:
return_val = self.__priorityList[key]
except:
self.__mutex.unlock()
raise KeyError( "skulkHeap cannot resolve key %s of type %s" %(key, type(key) ) )
elif type(key) == str or ( sys.version_info.major == 2 and type(key) == unicode ):
try:
return_val = self.__store[key]
except:
self.__mutex.unlock()
raise KeyError( "skulkHeap cannot resolve key %s of type %s" %(key, type(key) ) )
else:
self.__mutex.unlock()
raise KeyError( "skulkHeap cannot resolve key %s of type %s" %(key, type(key) ) )
self.__mutex.unlock()
return return_val
def __len__(self): # Just need to lock things temporarily
self.__mutex.lock()
self.__mutex.unlock()
return len( self.__store )
def __setitem__(self, key, value):
# value is a list [priority,zorroObj,status,]
self.__mutex.lock()
try:
self.__store[key] = value
# Make a priority list
self.__priorityList.append( value )
self.__priorityList.sort(reverse=True)
except:
self.__mutex.unlock()
raise
self.__mutex.unlock()
def __delitem__( self, key ):
# pop has the mutex
self.pop(key)
def __keytransform__(self, key):
# No mutex protection for empty function
return key
def __iter__(self):
# If you want to iterate over keys, use 'for key in skulkHeap.keys()'
return iter(self.__priorityList)
def next( self, STATUS ):
"""
Find the highest priority object with the given STATUS
"""
self.__mutex.lock()
for stateItem in self.__priorityList:
if stateItem.state == STATUS:
self.__mutex.unlock()
return stateItem
elif stateItem < 0.0: # COMPLETE jobs have negative priority
self.__mutex.unlock()
return None
self.__mutex.unlock()
return None
def getByName( self, searchName ):
self.__mutex.lock()
for stateItem in self.__priorityList:
if stateItem.name == searchName:
self.__mutex.unlock()
return stateItem
self.__mutex.unlock()
return None
def items(self):
self.__mutex.lock()
self.__mutex.unlock()
return self.__store.items()
def keys(self):
self.__mutex.lock()
self.__mutex.unlock()
return self.__store.keys()
def popNext( self, STATUS ):
"""
Find the highest priority object with the given STATUS and remove it
"""
self.__mutex.lock()
for J, stateItem in enumerate(self.__priorityList) :
if stateItem.state == STATUS:
self.__priorityList.pop( J )
# self.__priorityList.remove( stateItem )
# print( "DEBUG: trying to pop key: " + str( stateItem.id ) +" from dict " + str(self.__store.keys()) )
try: self.__store.pop( stateItem.id )
except: print( "%s not popped from skulkHeap" % stateItem.id )
self.__mutex.unlock()
return stateItem
elif stateItem < 0.0: # COMPLETE jobs have negative priority
self.__mutex.unlock()
return None
self.__mutex.unlock()
return None
def isLocked( self ):
state = self.__mutex.tryLock()
if state:
self.__mutex.unlock()
return not state
def pop( self, key ):
"""
Key can be an integer, a string. Returns the zorroState only.
"""
self.__mutex.lock()
# If we get an integer, get an index
if type(key) == int:
try:
returnState = self.__priorityList.pop(key)
# print( "DEBUG: trying to pop key: " + str( returnState.id ) +" from dict " + str(self.__store.keys()) )
del self.__store[ returnState.id ]
except:
self.__mutex.unlock()
raise
elif type(key) == str or ( sys.version_info.major == 2 and type(key) == unicode ):
try:
returnState = self.__store.pop(key)
# This is a bit tricky, can Python find the right zorroState?
self.__priorityList.remove( returnState )
except:
self.__mutex.unlock()
raise
else:
self.__mutex.unlock()
raise KeyError( "skulkHeapQueue cannot resolve key %s of type %s" %(key, type(key) ) )
self.__mutex.unlock()
return returnState
# def popById( self, idNumber ):
# """
# Try to find a zorroState by its unique ID, and remove and return it.
# """
# print( "locking popById" )
# self.__mutex.lock()
# for J, stateItem in enumerate(self.__priorityList):
# if stateItem.id == idNumber:
#
# # This is within a try isn't it.
# try:
# print( "Trying to remove id %d at position J" % (idNumber,J) )
# self.__priorityList.remove( J )
# except: print( "Failed to remove from priorityList" )
# try:
# print( "Trying to pop %s from store" % stateItem.id )
# self.__store.pop( stateItem.id )
# except: print( "Failed to remove from store" )
# print( "unlocking popById, found %d" % idNumber )
# self.__mutex.unlock()
# return stateItem
# pass
# print( "unlocking popById, found None" )
# self.__mutex.unlock()
# return None
def __str__( self ):
str_repr = ""
for state_id, zorroState in self.items():
str_repr += "(%s) %s | state: %s | priority: %.3f\n" % (state_id, zorroState.name, STATE_NAMES[zorroState.state], zorroState.priority)
return str_repr
#################### SKULKPATHS CLASS ###############################
class skulkPaths(collections.MutableMapping):
"""
This class is used for managing paths, which can get quite complicated if they are over networks. The
assumption is that relative paths are passed to Zorro, or saved in config files, but the user sees
the real paths (as it's less confusing). So basically it accepts user paths as inputs, which are
converted internally into a real path and a normed path. Normed paths are used by Zorro for writing
files, as it preserves the ability of the workspace to be copied, whereas the user sees the full
real path.
For future use we plan to incorporate the ability to send files via SSH/rsync anyway, so we need a
class to handle this complexity. Should also be used for handling on-the-fly compression in the future.
Possibly we could also pre-copy files to speed things up, and in general I want less file handling
inside Zorro.
Uses os.path.normpath() to collapse extraneous dots and double-dots
This class is thread-safe and can be references simultaneously from skulkManager and Automator.
TODO: some exceptions may leave the mutex locked. Use try statements to unlock the
mutex and then re-raise the exception.
"""
def __init__(self, cwd=None):
collections.MutableMapping.__init__(self)
self.__mutex = QtCore.QMutex()
# The maximum number of files to copy at once with subprocess workers
# With rsync there's some advantage to
self.maxCopyStreams = 4
# Check if we can move files from one path to another without copying accross devices
# os.stat( path1 ).st_dev == os.stat( path2 ).st_dev
# Works on Linux only, not on Windows, so need an os check as well.
# Of course parallel FTP is even faster, but IT don't like unencrypted communication
self.__real = { 'input_dir':None, 'output_dir':None, 'cwd': None,
'raw_subdir': None, 'sum_subdir':None,
'align_subdir':None, 'fig_subdir':None, 'cache_dir': None,
'qsubHeader':None, 'gainRef':None }
self.__norm = { 'input_dir':None, 'output_dir':None, 'cwd': '.',
'raw_subdir': None, 'sum_subdir':None,
'align_subdir':None, 'fig_subdir':None, 'cache_dir': None,
'qsubHeader':None, 'gainRef':None }
# Normalize paths relative to output_dir os.path.normpath(join( self.__NormPaths['output_dir'], path))
# Default cache directories
if os.name == 'nt':
self.__real['cwd'] = os.path.realpath( '.' )
self.__real['cache_dir'] = "C:\\Temp\\"
# ERROR: this braks if we can't make a relative path, i.e. we're on different Windows drive letters
try:
self.__norm['cache_dir'] = os.path.normpath( os.path.relpath( "C:\\Temp\\", self.__real['cwd'] ) )
except:
self.__norm['cache_dir'] = self.__real['cache_dir']
else:
self.__real['cwd'] = os.environ['PWD']
self.__real['cache_dir'] = "/scratch/"
self.__norm['cache_dir'] = os.path.normpath( os.path.relpath( "/scratch/", self.__real['cwd'] ) )
pass
# TODO: write and read self from a ConfigParser
def __setitem__( self, key, value ):
"""
Accepts either a real or a relative path and saves normed and real versions of the path
"""
self.__mutex.lock()
if bool( value ):
# if value == 'cwd': # Special
# # DEBUG: DO WE WANT TO CHANGE THE PROGRAM DIRECTORY? OR JUST REDO ALL THE PATHS?
# self.__real['cwd'] = os.path.realpath( os.path.join( os.environ['PWD'], value ) )
# self.__norm['cwd'] = os.path.relpath( os.path.join( os.environ['PWD'], value ) )
#
# # Apply cwd as a mask to every path
# self.__mutex.unlock()
# for key in self.__norm:
# if bool(self.__norm[key] ):
# self.__real[key] = os.path.realpath( os.path.join( self.__real['cwd'], self.__norm[key]) )
#
#print( "DEBUG: self.__real['cwd']: %s" % self.__real['cwd'] )
# Apply to norm path
# Generate a real path based on the current working directory.
if os.path.isabs( value ):
self.__real[key] = os.path.normpath( value )
try:
self.__norm[key] = os.path.normpath( os.path.relpath( value, start=self.__real['cwd']) )
except ValueError:
# On Windows we can get an error, due to drive letters not allowing relative paths.
self.__norm[key] = self.__real[key]
else:
self.__norm[key] = os.path.normpath( value )
self.__real[key] = os.path.normpath( os.path.join( self.__real['cwd'], value ) )
#print( "self.__norm[ %s ]: %s" % (key ,self.__norm[key]) )
#print( "self.__real[ %s ]: %s" % (key, self.__real[key]) )
self.__mutex.unlock()
def to_json( self ):
return self.__norm
def __getitem__( self, key ):
# Return normed path.
# self.__mutex.lock()
# self.__mutex.unlock()
return self.__norm[key]
def __iter__(self):
return iter(self.__norm)
def __keytransform__(self, key):
return key
def __len__(self):
return len(self.__norm)
# I would prefer to use this like a dict but it's messy without subclassing dict again
def get_real( self, key ):
"""
Return the real path, relative to root. Useful for display to the user.
"""
self.__mutex.lock()
self.__mutex.unlock()
return self.__real[key]
def keys(self):
return self.__norm.keys()
def __delitem__( self, key ):
self.__mutex.lock()
try:
self.__norm.pop(key)
self.__real.pop(key)
except:
self.__mutex.unlock()
print( "Error: no key %s in skulkPaths" % key )
self.__mutex.unlock()
def __str__(self):
self.__mutex.lock()
retstr = "%10s # %30s # %30s\n" % ( "key", "norm", "real" )
for key in self.__norm.keys():
if bool( self.__norm[key] ) and bool( self.__real[key] ):
retstr += "%10s # %30s # %30s\n" %( key, self.__norm[key], self.__real[key] )
self.__mutex.unlock()
return retstr
def __contains__( self, key ):
self.__mutex.lock()
if key in self.__NormPaths:
self.__mutex.unlock()
return True
self.__mutex.unlock()
return False
def isLocalInput(self):
# TODO: test if checking the drive letter is sufficient on Windows systems.
if os.name == 'nt':
if os.path.splitdrive( self.__real['input_dir'] )[0] == os.path.splitdrive( self.__real['output_dir'] )[0]:
return True
else:
return False
if os.stat( self.__real['input_dir'] ).st_dev == os.stat( self.__real['output_dir'] ).st_dev:
return True
return False
def validate( self ):
# See if the directories exist. If not, try to make them. Zorro does this as well but it's
# better to have user feedback at the start
# Cycle mutex lock, it will
self.__mutex.lock()
self.__mutex.unlock()
errorText = ""
errorState = False
# Debug: show the paths matrix
# print( str(self) )
try:
if not bool(self.__real['input_dir']):
errorState = True; errorText += "Error: Input directory field is empty.\n"
raise ValueError
if not os.path.isdir( self.__real['input_dir'] ):
os.mkdir( self.__real['input_dir'] )
if not os.access( self.__real['input_dir'], os.R_OK ):
errorState = True; errorText += "Error: Input directory has no read permissions.\n"
if not os.access( self.__real['input_dir'], os.W_OK ):
errorText += "Warning: Input directory has no write permissions, cannot delete stacks from SSD.\n"
except OSError:
errorState = True; errorText += "Error: Input directory does not exist and could not be made.\n"
except ValueError:
pass
try:
if not bool(self.__real['output_dir']):
errorState = True; errorText += "Error: Output directory field is empty.\n"
raise ValueError
if not os.path.isdir( self.__real['output_dir'] ):
os.mkdir( self.__real['output_dir'] )
if not os.access( self.__real['output_dir'], os.R_OK ):
errorState = True; errorText += "Error: Output directory has no read permissions.\n"
if not os.access( self.__real['output_dir'], os.W_OK ):
errorState = True; errorText += "Warning: Output directory has no write permissions.\n"
except OSError:
errorState = True; errorText += "Error: Output directory does not exist and could not be made.\n"
except ValueError:
pass
if errorState:
return errorState, errorText
# Continue with subdirectories
try:
if not bool(self.__real['raw_subdir']):
self.__real['raw_subdir'] = os.path.join( self.__real['output_dir'], '../raw' )
self.ui_FileLocDialog.leRawPath.setText( self.__real['raw_subdir'] ) # Doesn't fire event
errorText += "Warning: Raw directory set to default <out>/raw.\n"
if not os.path.isdir( self.__real['raw_subdir'] ):
os.mkdir( self.__real['raw_subdir'] )
if not os.access( self.__real['raw_subdir'], os.R_OK ):
errorState = True; errorText += "Error: Raw directory has no read permissions.\n"
if not os.access( self.__real['raw_subdir'], os.W_OK ):
errorState = True; errorText += "Warning: Raw directory has no write permissions.\n"
except OSError:
errorState = True; errorText += "Error: Raw directory does not exist and could not be made.\n"
try:
if not bool(self.__real['sum_subdir']):
self.__real['sum_subdir'] = os.path.join( self.__real['output_dir'], '../sum' )
self.ui_FileLocDialog.leSumPath.setText( self.__real['sum_subdir'] ) # Doesn't fire event
errorText += "Warning: sum directory set to default <out>/sum.\n"
if not os.path.isdir( self.__real['sum_subdir'] ):
os.mkdir( self.__real['sum_subdir'] )
if not os.access( self.__real['sum_subdir'], os.R_OK ):
errorState = True; errorText += "Error: sum directory has no read permissions.\n"
if not os.access( self.__real['sum_subdir'], os.W_OK ):
errorState = True; errorText += "Warning: sum directory has no write permissions.\n"
except OSError:
errorState = True; errorText += "Error: sum directory does not exist and could not be made.\n"
try:
if not bool(self.__real['align_subdir']):
self.__real['align_subdir'] = os.path.join( self.__real['output_dir'], '../align' )
self.ui_FileLocDialog.leAlignPath.setText( self.__real['align_subdir'] ) # Doesn't fire event
errorText += "Warning: align directory set to default <out>/align.\n"
if not os.path.isdir( self.__real['align_subdir'] ):
os.mkdir( self.__real['align_subdir'] )
if not os.access( self.__real['align_subdir'], os.R_OK ):
errorState = True; errorText += "Error: align directory has no read permissions.\n"
if not os.access( self.__real['align_subdir'], os.W_OK ):
errorState = True; errorText += "Warning: align directory has no write permissions.\n"
except OSError:
errorState = True; errorText += "Error: align directory does not exist and could not be made.\n"
try:
if not bool(self.__real['fig_subdir']):
self.__real['fig_subdir'] = os.path.join( self.__real['output_dir'], '../figure' )
self.ui_FileLocDialog.leFiguresPath.setText( self.__real['fig_subdir'] ) # Doesn't fire event
errorText += "Warning: figure directory set to default <out>/figure.\n"
if not os.path.isdir( self.__real['fig_subdir'] ):
os.mkdir( self.__real['fig_subdir'] )
if not os.access( self.__real['fig_subdir'], os.R_OK ):
errorState = True; errorText += "Error: figure directory has no read permissions.\n"
if not os.access( self.__real['fig_subdir'], os.W_OK ):
errorState = True; errorText += "Warning: figure directory has no write permissions.\n"
except OSError:
errorState = True; errorText += "Error: figure directory does not exist and could not be made.\n"
# Check for path uniqueness
if self.__real['input_dir'] == self.__real['output_dir']:
errorState = True; errorText += "Error: Input and output directory may not be the same.\n"
if self.__real['input_dir'] == self.__real['raw_subdir']:
errorState = True; errorText += "Error: Input and raw directory may not be the same.\n"
if self.__real['input_dir'] == self.__real['sum_subdir']:
errorState = True; errorText += "Error: Input and sum directory may not be the same.\n"
if self.__real['input_dir'] == self.__real['align_subdir']:
errorState = True; errorText += "Error: Input and align directory may not be the same.\n"
return errorState, errorText
#################### SKULKHOST CLASS ###############################
#class skulkHost(QtCore.QThread):
class skulkHost(object):
"""
A skulkHost manages the individual Zorro jobs dispatched by Automator. On a local machine, one skulkHost
is created for each process specified to use.
On a cluster a skulkHost is one job, so there can be more hosts than available nodes if desired.
"""
def __init__(self, hostname, workerFunction, messageQueue,
n_threads = None, cachepath = None, qsubHeader=None ):
"""
def __init__(self, hostname, workerFunction, messageQueue,
n_threads = None, cachepath = None, qsubHeader=None )
hostName is any string that ID's the host uniquely
workerFunction is one of ['local','qsub', 'rsync', 'archive'], reflecting what the host should do.
messageQueue is the message queue from the skulkManager
n_threads is the number of p-threads to use for the job.
qsubHeader is a text file that contains everything but the qsub line for a .bash script.
"""
# In case I need to re-factor to have a seperate thread for each host
#QtCore.QThread.__init__(self)
# self.sleepTime = 1.0
self.hostName = hostname
self.messageQueue = messageQueue
self.zorroState = None
self.submitName = None
# CANNOT get an instantiated object here so we cannot pass in bound function handles directly, so
# we have to use strings instead.
if workerFunction == 'local':
self.workerFunction = self.submitLocalJob
elif workerFunction == 'dummy':
self.workerFunction = self.submitDummyJob
elif workerFunction == 'qsub':
self.workerFunction = self.submitQsubJob
elif workerFunction == 'rsync':
self.workerFunction = self.submitRsyncJob
elif workerFunction == 'archive':
self.workerFunction = self.submitArchiveJob
else:
self.workerFunction = workerFunction
self.subprocess = None
self.n_threads = n_threads
self.cachePath = cachepath
self.qsubHeaderFile = qsubHeader
def __clean__(self):
self.subprocess = None
self.zorroState = None
try: os.remove( self.submitName )
except: pass
def kill(self):
try:
if bool( self.subprocess ):
# Kill again with psutil
try:
print( "Trying to kill subprocess pid %d" % self.subprocess.pid )
os_process = psutil.Process( self.subprocess.pid )
for child_process in os_process.children(recursive=True):
child_process.kill()
os_process.kill()
except Exception as e:
print( "skulkHost.kill() psutil varient received exception: " + str(e) )
# Well this sort of works, now we have defunct zombie processes but they
# stop running. Force garbage collection with del
# subprocess.kill() doesn't always work, so use psutil to do it
self.subprocess.communicate() # Get any remaining output
self.subprocess.kill()
del self.subprocess
except Exception as e:
print( "skulkHost.kill raised exception: " + str(e) )
if self.workerFunction == self.submitQsubJob:
print( "Warning: User must call qdel on the submitted job. We need to id the job number during submission" )
# Open the host for new jobs
self.__clean__()
def poll(self):
# Would it be better to have some sort of event driven implementation?
if self.subprocess == None and self.zorroState != None:
# BUG: sometimes this skips...
# Can we assume it finished? What if it's an error? Can we peak at the log?
priorState = self.zorroState.state
try:
diskState= self.zorroState.peek()
# Is this stable over all states?
if priorState == SYNCING:
self.messageQueue.put( [RENAME, self.zorroState] )
self.messageQueue.put( [diskState, self.zorroState] )
except:
print( "DEBUG: skulkHost.poll couldn't peak at log %s" % self.zorroState.zorroObj.files['config'] )
self.__clean__()
# Remove the submission script if present
return HOST_FREE
elif self.zorroState == None:
return HOST_FREE
# Else: we have a subprocess and a zorroState
status = self.subprocess.poll()
if status == None:
return HOST_BUSY
elif status == 0:
# Send a message that we finished a process and that the state should be incremented.
self.zorroState.incrementState()
self.messageQueue.put( [self.zorroState.state, self.zorroState] )
#if self.zorroState.state == PROCESSING:
# self.messageQueue.put( [FINISHED, self.zorroState] )
#elif self.zorroState.state == SYNCING:
# self.messageQueue.put( [RENAME, self.zorroState] )
#elif self.zorroState.state == ARCHIVING:
# self.messageQueue.put( [COMPLETE, self.zorroState] )
#else:
# if self.zorroState.state in STATE_NAMES:
# raise ValueError( "skulkHost: unknown job type: " + str(STATE_NAMES[self.zorroState.state]) )
# else:
# raise ValueError( "skulkHost: unknown job type: " + str(self.zorroState.state) )
# Release the subprocess and state object
self.__clean__()
return HOST_FREE
else: # Error state, kill it with fire
self.kill()
self.messageQueue.put( [HOST_ERROR, self.zorroState] )
return HOST_FREE
def submitRsyncJob( self, stateObj ):
# zorroObj.files['raw'] contains the target location
# TODO: add compression as an option to rsync, generally slows the transfer down unless you have a
# bunch running simulanteously
self.zorroState = stateObj
compress_flag = "-v"
remove_flag = "--remove-source-files"
source = stateObj.name # or stateObj.zorroObj.files['original']
target = stateObj.zorroObj.files['stack']
rsync_exec = "rsync " + compress_flag + " " + remove_flag + " " + source + " " + target
print( "RSYNC command: " + rsync_exec )
self.subprocess = sp.Popen( rsync_exec, shell=True )
self.messageQueue.put( [HOST_BUSY, self.zorroState] )
def submitArchiveJob( self, zorroState ):
print( "TODO: move files and compress them using an external utility, or blosc with zlib?" )
def submitLocalJob( self, stateObj ):
# Local job, no queue
# Check/copy that stackName exists in the right place
self.zorroState = stateObj
self.zorroState.zorroObj.n_threads = self.n_threads
if self.cachePath != None: # Force use of host's cachePath if it's present...
self.zorroState.zorroObj.cachePath = self.cachePath
self.zorroState.zorroObj.files['stdout'] = os.path.splitext( self.zorroState.zorroObj.files['config'] )[0] + ".zout"
self.zorroState.zorroObj.METAstatus = STATE_NAMES[PROCESSING]
# Execute zorro
self.zorroState.zorroObj.saveConfig()
# We could just call 'zorro' script but this is perhaps safer.
# zorro_script = os.path.join( os.path.split( zorro.__file__ )[0], "__main__.py" )
#commandStr = "python " + zorro_script + " -c " + self.zorroState.zorroObj.files['config']
commandStr = "zorro -c " + self.zorroState.zorroObj.files['config'] + " >> " + self.zorroState.zorroObj.files['stdout'] + " 2>&1"
print( "Subprocess local exec: " + commandStr )
# Seems that shell=True is more or less required.
self.subprocess = sp.Popen( commandStr, shell=True )
self.messageQueue.put( [HOST_BUSY, self.zorroState] )
def submitQsubJob( self, stateObj ):
# Local job, no queue
# Check/copy that stackName exists in the right place
self.zorroState = stateObj
# Load header string
with open( self.qsubHeaderFile, 'r' ) as hf:
submitHeader = hf.read()
self.zorroState.zorroObj.n_threads = self.n_threads
if self.cachePath != None: # Force use of host's cachePath if it's present...
self.zorroState.zorroObj.cachePath = self.cachePath
self.zorroState.zorroObj.METAstatus = STATE_NAMES[PROCESSING]
# Force to use 'Agg' with qsub, as often we are using Qt4Agg in Automator
self.zorroState.zorroObj.plotDict['backend'] = 'Agg'
# Setup the qsub .bash script
# zorro_script = os.path.join( os.path.dirname( zorro.__file__ ), "__main__.py" )
# Hrm, ok, so the cfgFront is relative to where?
cfgFront = os.path.splitext(self.zorroState.zorroObj.files['config'] )[0]
cfgBase = os.path.splitext( os.path.basename(self.zorroState.zorroObj.files['config'] ) )[0]
self.submitName = 'z' + cfgBase + ".sh"
# Plotting is crashing on the cluster.
# submitCommand = "python %s -c %s" % (zorro_script, self.zorroState.zorroObj.files['config'] )
submitCommand = "zorro -c %s" % (self.zorroState.zorroObj.files['config'] )
# Actually, it's easier if I just do all the subsitutions myself, rather than farting around with
# different operating systems and environment variables
submitHeader = submitHeader.replace( "$JOB_NAME", self.submitName )
self.zorroState.zorroObj.files['stdout'] = cfgFront + ".zout"
submitHeader = submitHeader.replace( "$OUT_NAME", self.zorroState.zorroObj.files['stdout'] )
# Use the same output and error names, otherwise often we don't see the error.
submitHeader = submitHeader.replace( "$ERR_NAME", self.zorroState.zorroObj.files['stdout'] )
submitHeader = submitHeader.replace( "$N_THREADS", str(self.n_threads) )
# Save configuration file for the load
self.zorroState.zorroObj.saveConfig()
# Write the bash script to file
with open( self.submitName, 'w' ) as sub:
sub.write( submitHeader )
sub.write( submitCommand + "\n" )
print( "Submitting job to grid engine : %s"%self.submitName )
# We use -sync y(es) here to have a blocking process, so that we can check the status of the job.
commandStr = "qsub -sync y " + self.submitName
# self.subprocess = sp.Popen( commandStr, shell=True, env=OSEnviron )
self.subprocess = sp.Popen( commandStr, shell=True, stdout=sp.PIPE )
# Can we get the job ID from the "Your job 16680987 ("z2016-08-04_09_04_07.dm4.sh") has been submitted " line?
# time.sleep( 1.0 )
# out, err = self.subprocess.communicate()
# print( "TODO: parse output, error string: %s, %s" %(out,err) )
self.messageQueue.put( [HOST_BUSY, self.zorroState] )
# Submission script is cleaned by the successful return of the job only. The alternative approach
# would be to submit a subprocess bash script that waits 60 s and then deletes it?
def submitDummyJob( self, zorroState ):
self.zorroState = zorroState
# Cheap job for testing purposes
time.sleep( 5 )
pass
def submitJob_viaSSH( self, zorroState ):
# Remote job, no queue#################### SUBPROCESS FUNCTIONS ###############################
print( "Submission of jobs via SSH is not implemented yet." )
pass
def qsubJob_visSSH( self, zorroState ):
# Remote queue, submit via SSH needs some sort of distributed file system? Or can we use rsync (if
# this is to be used as a daemon in this case)
print( "Submission of jobs via SSH is not implemented yet." )
pass
pass
#################### SKULKMANAGER CLASS ###############################
class skulkManager(QtCore.QThread):
# Signals must be statically declared?
try:
automatorSignal = QtCore.Signal( str, str, str )
except:
automatorSignal = None
def __init__(self, inputQueue=None ):
QtCore.QThread.__init__(self)
self.verbose = 3
self.DEBUG = False
# Processible file extensions
self.globPattern = ['*.dm4', '*.dm3', '*.mrc', '*.mrcz', '*.tif', '*.tiff',
'*.mrcs', '*.mrcsz', '*.hdf5', '*.h5', '*.bz2', '*.gz', '*.7z' ]
# Default object for copying the parameters (also accessed by Automator)
self.zorroDefault = zorro.ImageRegistrator()
# Mutex-protected queues of the image stacks to be worked on
self.__globalHeap = skulkHeap()
self.__newHeap = skulkHeap()
self.__syncHeap = skulkHeap()
self.__procHeap = skulkHeap()
self.__archiveHeap = skulkHeap()
self.completedCount = 0
# Messages from the hosts to the skulkManager (i.e. I've finished, or, I've fallen and I can't get up again)
# This is unidirectional from the hosts to the skulk manager
self.messageQueue = queue.Queue(maxsize=0)
# Servers that will accept jobs, is a dict of skulkHosts
self.procHosts = {}
self.syncHosts = {}
self.archiveHosts = {}
# Path manager, for keeping real and normed paths manageable.
self.paths = skulkPaths()
# TODO: Do we want Paramiko for running remote jobs via SSH? Or restrict ourselves to qsub locally?
# https://github.com/paramiko/paramiko
# It's not in Anaconda by default, but it's in both conda and pip
self.__STOP_POLLING = False
# bytes, should be bigger than Digital Micrograph header size, but smaller than any conceivable image
self.sleepTime = 1.0
pass
def __getitem__( self, key ):
return self.__globalHeap[key]
def __len__(self):
return len(self.__globalHeap)
def keys( self ):
return self.__globalHeap.keys()
def setDEBUG( self, DEBUG_STATE ):
self.DEBUG = bool( DEBUG_STATE )
def run(self):
self.completedCount = 0
loopCounter = 0
while not self.__STOP_POLLING:
t0 = time.time()
# Check queues if they have returned a result
while self.messageQueue.qsize() > 0:
# We have message(s)
self.processNextMessage()
# DEBUG OUTPUT ALL ITEMS IN GLOBAL HEAP
# print( "GLOBAL HEAP: \n" + str(self.__globalHeap) )
### Poll the skulkHosts to see if they are free
statusMessage = "%s "%loopCounter
freeProcs = []
statusMessage += " | PROC "
for J, hostKey in enumerate( self.procHosts ):
pollValue = self.procHosts[hostKey].poll()
if pollValue == HOST_FREE:
freeProcs.append( self.procHosts[hostKey] )
elif pollValue == HOST_BUSY:
pass
elif pollValue.startswith( 'error' ):
# TODO: do we want an actual exception?
print( "ERROR in subprocess on host %s: %s (lookup code)" % (hostKey, pollValue ) )
# Free the host
self.procHosts[hostKey].kill()
if pollValue in STATE_NAMES:
statusMessage += ": %s "% STATE_NAMES[pollValue]
else:
statusMessage += ": ===%s=== "% pollValue
freeSyncs = []
statusMessage += " | SYNC "
for J, hostKey in enumerate( self.syncHosts ):
pollValue = self.syncHosts[hostKey].poll()
if pollValue == HOST_FREE:
freeSyncs.append( self.syncHosts[hostKey] )
elif pollValue == HOST_BUSY:
pass
elif pollValue.startswith( 'error' ):
# TODO: do we want an actual exception?
print( "ERROR in sync on host %s: error %s" % (hostKey, pollValue ) )
# Free the host
self.syncHosts[hostKey].kill()
if pollValue in STATE_NAMES:
statusMessage += ": %s "% STATE_NAMES[pollValue]
else:
statusMessage += ": ===%s=== "% pollValue
# I could put in a mod command, base 10.0?
if np.mod( loopCounter, 10 ) == 0:
print( str(statusMessage) )
while len( freeProcs ) > 0:
# print( "In freeProcs loop %d hosts, %d zorros" % (len(freeProcs), len(self.__procHeap) ) )
nextJob = self.__procHeap.popNext( PROCESSING )
if not bool(nextJob): # == None
break
# Re-write the configuration using the latest
nextJob.updateConfig( self.zorroDefault )
# Submit a job, the workerFunction or the zorroState should change the state to PROCESSING
freeProcs.pop().workerFunction( nextJob )
# Update priority
nextJob.priority *= 0.1
# print( "TODO: priority handling" )
while len( freeSyncs ) > 0:
# print( "In freeSyncs loop (TODO: archiving): %d hosts, %d zorros" % (len(freeSyncs), len(self.__syncHeap) ) )
nextSync = self.__syncHeap.popNext( SYNCING ) # Could be SYNCING or ARCHIVING? Think about it
if not bool(nextSync):
break
# TODO: pick worker function based on the STATUS?
freeSyncs.pop().workerFunction( nextSync )
# Have all the zorroState objects check their state
# This could become slow if we have thousands of objects loaded. Perhaps, since it's ordered,
# we should only check so many? Or check specifically the special queues?
# Probably we should pass in freeHosts?
for key, zorroState in self.__globalHeap.items():
zorroState.update()
# Now check the input directory for new files to add to the globalHeap
self.watch()
# Sleep the polling function
loopCounter += self.sleepTime
t1 = time.time()
if self.verbose >= 4:
print( "Poll time (s): %.6f" %(t1-t0) )
self.sleep( self.sleepTime )
# End skulkManager.run()
def processNextMessage( self ):
messageList = self.messageQueue.get()
message, zorroState = messageList
if self.DEBUG:
try:
print( "DEBUG skulkManager.run(): Received message: " + STATE_NAMES[message] + " for " + str(zorroState) )
except KeyError:
print( "DEBUG skulkManager.run(): Received message: " + message + " for " + str(zorroState) )
# zorroState.update MESSAGES
messageColor = None
if message == NEW or message == CHANGING:
# Just update the status color in Automator
pass
elif message == RENAME:
# The assumption here is that the name has changed, but the ID has
# not changed.
# and add the new one from the globalHeap
# Notify Automator of the new name
self.automatorUpdate( zorroState.id, zorroState.name, 'rename' )
if self.DEBUG:
print( "DEBUG: Renamed %s" % zorroState.name )
# Remove the file from the sync heap if necessary
try:
self.__syncHeap.pop( zorroState.id )
except: pass
# Update the pointer in the global heap.
self.__globalHeap[zorroState.id] = zorroState
message = READY # WARNING: not always the case!
elif message == READY:
# We can take it out of the new file heap at this stage.
try: self.__newHeap.pop( zorroState.id )
except: pass
return
elif message == STABLE:
return
elif message == PROCESSING: # zorroState wants to be in procHeap
if self.DEBUG:
print( "Adding (%s) %s to processing Heap" % (zorroState.id, zorroState.name) )
self.__procHeap[zorroState.id] = zorroState
elif message == SYNCING:
self.__syncHeap[zorroState.id] = zorroState
elif message == ARCHIVING:
self.__archiveHeap[zorroState.id] = zorroState
# skulkHost MESSAGES
elif message == HOST_BUSY:
# Zorro is processing this stack, green status, just update the status color in Automator
# TODO: different colors for different states?
if zorroState.state == SYNCING:
messageColor = 'deeppink'
elif zorroState.state == PROCESSING:
messageColor = 'steelblue'
elif zorroState.state == ARCHIVING:
messageColor = 'saddlebrown'
pass
elif message == FINISHED:
try:
self.__procHeap.pop( zorroState.id )
print( "Popped finished job (%s) %s from procHeap" % (zorroState.id, zorroState.name) )
except:
# It's more or less normal for it to not be there after it finishes, we should be
# more concerned if it's present still.
#print( "WARNING skulkManager.run() missing zorroState on procHeap: (%s) %s" %( zorroState.id, zorroState.name ) )
#print( "procHeap: " + str(self.__procHeap) )
pass
# Not necessary to increment zorroState as it should be 'fini' from the log file
# zorroState.incrementState()
# Load from disk
zorroState.loadFromLog( zorroState.zorroObj.files['config'], notify=False )
elif message == COMPLETE:
self.completedCount += 1
try:
self.__archiveHeap.pop( zorroState.id )
except:
# We aren't really using archiving anymore, the blosc compression is so much faster
pass
elif message == HOST_ERROR or message == ERROR:
# Remove from all the heaps except the global heap.
try:
self.__clean__( zorroState.id )
except: pass
return
else:
print( "skulkManager::mainPollingLoop: Unknown message : " + STATE_NAMES[message] )
pass
# Send updates to the Automator GUI
if messageColor == None:
messageColor = STATE_COLORS[message]
self.automatorUpdate( zorroState.id, zorroState.name, messageColor )
def watch(self):
"""
Poll the input directory for new stack files, and generate zorroStates for them if new image stacks
appear
"""
newGlobList = []
for globPat in self.globPattern:
# I wonder if normed or real paths are better here.
newGlobList += glob.glob( os.path.join( self.paths.get_real('input_dir'), globPat ) )
# And for every existing key name, check to see if we have a record of it
for globbedFile in newGlobList:
# Ouch here we have a problem with the switch to id/name
if "CountRef" in globbedFile:
# Maybe this could be a statusbar thing instead?
# print( "Skipping SerialEM reference image %s" % globbedFile )
continue
if "SuperRef" in globbedFile:
continue
if not self.__newHeap.getByName( globbedFile ):
# Add it to the __globalHeap, __syncHeap should take care of itself.
newState = zorroState( globbedFile, self.zorroDefault, self.paths, self.messageQueue )
# print( "Adding new file %s with id %s" % (globbedFile, newState.id) )
self.__newHeap[newState.id] = newState
self.__globalHeap[newState.id] = newState
# end skulkManager.watch()
def initHosts( self, cluster_type='local', n_processes=1, n_threads=16, n_syncs=4, qsubHeader=None ):
"""
Starts all the hosts, for a given cluster_type.
For local the n_processes is the number of process.
For qsub the n_processes is the maximum number of simultaneous jobs in given 'qconf -sq $QUEUENAME'
"""
self.inspectLogDir()
self.__STOP_POLLING = False
del self.procHosts # Hard reset, user/Automator should call kill first to avoid dangling processes.
self.procHosts = {}
del self.syncHosts
self.syncHosts = {}
del self.archiveHosts
self.archiveHosts = {}
print( "Starting %d processing hosts" % n_processes )
self.procHosts = {}
for J in np.arange( n_processes ):
self.procHosts['host%d'%J] = skulkHost( 'host%d'%J, cluster_type, self.messageQueue,
n_threads=n_threads, qsubHeader=qsubHeader )
print( "Starting %d syncing hosts" % n_syncs )
self.syncHosts = {}
for J in np.arange( n_syncs ):
self.syncHosts['sync%d'%J] = skulkHost( 'sync%d'%J, 'rsync', self.messageQueue )
pass # end iniHosts()
def automatorUpdate( self, state_id, name, statusColor ):
"""
Send a signal to the parent Automator, if it exists
"""
if self.automatorSignal != None:
self.automatorSignal.emit( state_id, name, statusColor )
def __clean__( self, state_id ):
"""
"""
if self.DEBUG:
print( "Clean: " + str(state_id) )
try:
deleteZorro = self.__globalHeap.pop( state_id )
if deleteZorro != None:
try:
self.__procHeap.pop( state_id )
except: pass
try:
self.__syncHeap.pop( state_id )
except: pass
try:
self.__archiveHeap.pop( state_id )
except: pass
try:
self.__newHeap.pop( state_id )
except: pass
# print( "zorroSkulkManager: removed from Heap (%s): %s" % (state_id, deleteZorro.name) )
except Exception as e:
print( "Error skulkManager.remove: " + str(e) )
raise
# Check running jobs for the key and kill them if necessary
for host in list(self.procHosts.values()) + list(self.syncHosts.values()) + list(self.archiveHosts.values()):
if host.zorroState != None and host.zorroState.id == state_id:
host.kill()
if deleteZorro.state >= FINISHED:
self.completedCount -= 1
return deleteZorro
def remove( self, state_id ):
"""
WARNING: this function deletes all files associated with a key, not just the log. It cannot be undone.
"""
deleteZorro = self.__clean__( state_id )
# At this point we can remove all the associated files
if deleteZorro.zorroObj != None and ( \
deleteZorro.name.endswith( '.zor' ) \
or (deleteZorro != None and 'config' in deleteZorro.zorroObj.files) ):
try:
deleteZorro.zorroObj.loadConfig()
print( "Loaded: " + deleteZorro.zorroObj.files['config'] )
# Pop-shared files
if 'gainRef' in deleteZorro.zorroObj.files:
deleteZorro.zorroObj.files.pop['gainRef']
# DELETE ALL FILES
for filename in deleteZorro.zorroObj.files.values():
try: os.remove( filename )
except: pass
except Exception as e: # Failed to delete
print( "remove failed: " + str(e) )
pass
else: # We can't find a log with a list of files so we can only remove the key
try: os.remove( deleteZorro.name )
except: pass
# politely recommend garbage collection
self.automatorUpdate( deleteZorro.id, deleteZorro.name, 'delete' )
del deleteZorro
def reprocess( self, state_id ):
"""
Reprocesses a selected file with current conditions, and also gives the file extra-high priority so
it goes to top of the stack.
"""
# Try and pop the process so we can re-process it
deleteZorro = self.__clean__( state_id )
self.automatorUpdate( deleteZorro.id, deleteZorro.name, 'delete' )
# Because we update the configuration on promotion from READY to PROCESSING we do not need to delete teh existing file
# os.remove( deleteZorro.zorroObj.files['config'] )
# Make a new one
# Add it to the __globalHeap, __syncHeap should take care of itself.
rawStackName = deleteZorro.zorroObj.files['stack']
newState = zorroState( rawStackName, self.zorroDefault, self.paths, self.messageQueue )
# Give high priority
newState.topPriority()
# Decress state in case we were in an operation
newState.decrementState()
newState.name = newState.zorroObj.files['config']
# Add to globalHeap
self.__globalHeap[newState.id] = newState
# Add to procHeap immediatly
# Or does it do this itself?
# self.__procHeap[newState.id] = newState
# Tell Automator we have a 'new' stack
# self.automatorUpdate( newState.id, newState.name, STATE_COLORS[newState.state] )
def kill(self):
self.__STOP_POLLING = True
for hostObj in self.procHosts.values():
hostObj.kill()
for hostObj in self.syncHosts.values():
hostObj.kill()
for hostObj in self.archiveHosts.values():
hostObj.kill()
# Go through the skulkQueue and change any 'proc' statuses to 'stable'
if not self.__globalHeap.isLocked():
# Forcibly unlock the mutex for the global heap
self.__globalHeap._skulkHeap__mutex.unlock()
# Get rid of the secondary heaps
del self.__archiveHeap
del self.__syncHeap
del self.__procHeap
self.__archiveHeap = skulkHeap()
self.__syncHeap = skulkHeap()
self.__procHeap = skulkHeap()
# Reset any heap-states
for name, stateObj in self.__globalHeap.items():
stateObj.decrementState()
def inspectLogDir(self):
"""
Check the output directory for logs. In general this is only called when a start is issued.
"""
if self.paths['output_dir'] == None:
return
logList = glob.glob( os.path.join( self.paths['output_dir'], "*.zor" ) )
if len( logList ) == 0:
return
logList = np.sort(logList)
# Prioritize by lexigraphic sort
for J, log in enumerate(logList):
# This is no longer a problem as Zorro logs are now .zor format
#if log.endswith( "ctffind3.log" ): # Avoid trying CTF logs if they happen to be in the same place.
# continue
# make sure it's not already in the globalHeap, since we do it once at launch
stateObj = self.__globalHeap.getByName( log )
if not bool(stateObj):
newState = zorroState( log, self.zorroDefault, self.paths, self.messageQueue, notify=False )
# These will be very low priority compared to mtime priorities.
newState.priority = J + 1E9
self.__globalHeap[ newState.id ] = newState
self.automatorUpdate( newState.id, newState.name, STATE_COLORS[newState.state] )
else:
self.automatorUpdate( stateObj.id, stateObj.name, STATE_COLORS[stateObj.state] )
if __name__ == "__main__":
pass | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/automator/zorroSkulkManager.py | zorroSkulkManager.py |
from __future__ import division, print_function, absolute_import, unicode_literals
import os, os.path, sys, glob
try:
from PySide import QtGui, QtCore
except:
# Import PyQt4 as backup? I suspect this still causes license issues
ImportError( "Automator.py: PySide not found, I am going to crash now. Bye." )
import zorro
from . import zorroSkulkManager
# from copy import deepcopy
from . import Ui_Automator
from . import Ui_DialogFileLoc
from . import Ui_DialogOrientGainRef
# import time
import functools
try:
import Queue as queue
except:
import queue
import json
try:
import ConfigParser as configparser
except:
import configparser
import numpy as np
#progname = os.path.basename(sys.argv[0])
#progversion = u"0.7.1b0"
from .__version__ import __version__
#STATE_COLORS = { NEW:u'darkorange', CHANGING:u'darkorange', STABLE:u'goldenrod', SYNCING:u'goldenrod', READY:u'forestgreen',
# PROCESSING:u'forestgreen', FINISHED:u'indigo', ARCHIVING:u'saddlebrown', COMPLETE:u'dimgrey',
# STALE:u'firebrick', HOST_BUSY:u'host_busy', HOST_FREE:u'host_free', HOST_ERROR:u'firebrick',
# RENAME:u'rename' }
TOOLTIP_STATUS = { u'darkorange':u'New', u'goldenrod':u'Stable', u'indigo':u'Finished',
u'saddlebrown':u'Archiving', u'forestgreen':u'Ready', u'steelblue':'Processing',
u'dimgrey':u'Complete', u'firebrick':u'Error', u'black':u'Unknown',
u'deeppink':u'Syncing', u'':u'Unknown', u'rename':u'Renaming' }
#class Automator(Ui_Automator_ui, QtGui.QApplication):
class Automator(Ui_Automator.Ui_Automator_ui, QtGui.QApplication):
def __init__(self, testing=False ):
# Hack to get icons to show up...
# Better approach would be to give each one the real path.
# origdir = os.path.realpath('.')
# os.chdir( os.path.dirname(os.path.realpath(__file__)) )
QtGui.QApplication.__init__(self, sys.argv)
#self.app = QtGui.QApplication(sys.argv)
self.MainWindow = QtGui.QMainWindow()
self.ImageDialog = QtGui.QDialog()
self.FileLocDialog = QtGui.QDialog()
self.OrienGainRefDialog = QtGui.QDialog()
self.setupUi(self.MainWindow)
self.ui_FileLocDialog = Ui_DialogFileLoc.Ui_DialogFileLocations()
self.ui_FileLocDialog.setupUi( self.FileLocDialog )
self.ui_OrienGainRefDialog = Ui_DialogOrientGainRef.Ui_DialogOrientGainRef()
self.ui_OrienGainRefDialog.setupUi( self.OrienGainRefDialog )
# Force absolute paths to icons
self.joinIconPaths()
# Zorro objects and the default one built from the GUI
self.zorroDefault = zorro.ImageRegistrator()
self.skulkMessanger = queue.Queue()
self.skulk = zorroSkulkManager.skulkManager( self.skulkMessanger )
# Connect skulk.automatorSignal to myself
self.skulk.automatorSignal.connect( self.updateFromSkulk )
self.skulkThread = None
self.cfgfilename = ""
self.initClusterConfig()
# I should populate these with the default values from QtDesigner
self.cfgCommon = {}
self.cfgGauto = {}
self.cfgGplot = {}
self.stateIds = {} # Link unique Id to name
self.reverseIds = {} # Opposite to above
self.statusbar.showMessage( "Welcome to Automator for Zorro and 2dx, version " + __version__ )
# Apply all the connections from the QT gui objects to their associated functions in this class, using functools as desired
# Menu items
self.actionQuit.triggered.connect( self.quitApp )
self.MainWindow.closeEvent = self.quitApp
self.actionLoad_config.triggered.connect( functools.partial(self.loadConfig, None) )
self.actionSave_config.triggered.connect( functools.partial(self.saveConfig, None) )
self.actionSet_paths.triggered.connect( self.FileLocDialog.show )
self.actionCitations.triggered.connect( self.showCitationsDialog )
self.actionIMS_shortcuts.triggered.connect( self.showImsHelpDialog )
self.actionOrient_Gain_Reference.triggered.connect( self.OrienGainRefDialog.show )
self.actionGroupPopout = QtGui.QActionGroup(self)
self.actionGroupPopout.addAction( self.actionPrefer2dx_viewer )
self.actionGroupPopout.addAction( self.actionPreferIms )
self.actionGroupPopout.addAction( self.actionPreferMplCanvas )
self.actionGroupPopout.triggered.connect( self.preferPopout )
# Enable sorting for the file list
self.listFiles.setSortingEnabled( True )
#==============================================================================
# CAREFUL WITH FUNCTOOLS, IF OBJECTS ARE CREATED AND DESTROYED THEY AREN'T
# UPDATED WITH THE PARTIAL CONTEXT
#==============================================================================
# Paths
self.ui_FileLocDialog.tbOpenInputPath.clicked.connect( functools.partial(
self.openPathDialog, u'input_dir', True ) )
self.ui_FileLocDialog.tbOpenOutputPath.clicked.connect( functools.partial(
self.openPathDialog, u'output_dir', True ) )
self.ui_FileLocDialog.tbOpenRawPath.clicked.connect( functools.partial(
self.openPathDialog, u'raw_subdir', True ) )
self.ui_FileLocDialog.tbOpenSumPath.clicked.connect( functools.partial(
self.openPathDialog, u'sum_subdir', True ) )
self.ui_FileLocDialog.tbOpenAlignPath.clicked.connect( functools.partial(
self.openPathDialog, u'align_subdir', True ) )
self.ui_FileLocDialog.tbOpenFiguresPath.clicked.connect( functools.partial(
self.openPathDialog, u'fig_subdir', True ) )
self.ui_FileLocDialog.tbOpenGainRefPath.clicked.connect( functools.partial(
self.openFileDialog, u'gainRef', True ) )
self.ui_FileLocDialog.leInputPath.editingFinished.connect( functools.partial(
self.updateDict, u"skulk.paths", u'input_dir', self.ui_FileLocDialog.leInputPath.text ) )
self.ui_FileLocDialog.leOutputPath.editingFinished.connect( functools.partial(
self.updateDict, u"skulk.paths", u'output_dir', self.ui_FileLocDialog.leOutputPath.text ) )
self.ui_FileLocDialog.leRawPath.editingFinished.connect( functools.partial(
self.updateDict, u"skulk.paths", u'raw_subdir', self.ui_FileLocDialog.leRawPath.text ) )
self.ui_FileLocDialog.leSumPath.editingFinished.connect( functools.partial(
self.updateDict, u"skulk.paths", u'sum_subdir', self.ui_FileLocDialog.leSumPath.text ) )
self.ui_FileLocDialog.leAlignPath.editingFinished.connect( functools.partial(
self.updateDict, u"skulk.paths", u'align_subdir', self.ui_FileLocDialog.leAlignPath.text ) )
self.ui_FileLocDialog.leFiguresPath.editingFinished.connect( functools.partial(
self.updateDict, u"skulk.paths", u'fig_subdir', self.ui_FileLocDialog.leFiguresPath.text ) )
self.ui_FileLocDialog.leGainRefPath.editingFinished.connect( functools.partial(
self.updateDict, u"skulk.paths", u'gainRef', self.ui_FileLocDialog.leGainRefPath.text ) )
# Gainref has to be provided to the zorroDefault object later.
# File output and compression
self.ui_FileLocDialog.comboCompressor.setCurrentIndex(0) # Default to None
self.ui_FileLocDialog.comboCompressor.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u'files.compressor', self.ui_FileLocDialog.comboCompressor.currentText ) )
self.ui_FileLocDialog.cbGainRot.stateChanged.connect( functools.partial(
self.updateZorroDefault, u'gainInfo.Diagonal', self.ui_FileLocDialog.cbGainRot.isChecked ) )
self.ui_FileLocDialog.cbGainHorzFlip.stateChanged.connect( functools.partial(
self.updateZorroDefault, u'gainInfo.Horizontal', self.ui_FileLocDialog.cbGainHorzFlip.isChecked ) )
self.ui_FileLocDialog.cbGainVertFlip.stateChanged.connect( functools.partial(
self.updateZorroDefault, u'gainInfo.Vertical', self.ui_FileLocDialog.cbGainVertFlip.isChecked ) )
self.ui_OrienGainRefDialog.tbOrientGain_GainRef.clicked.connect( functools.partial(
self.openFileDialog, u'OrientGain_GainRef', True ) )
self.ui_OrienGainRefDialog.tbOrientGain_TargetStack.clicked.connect( functools.partial(
self.openFileDialog, u'OrientGain_TargetStack', True ) )
# self.ui_FileLocDialog.comboOutputFormat.currentIndexChanged.connect( functools.partial(
# self.updateZorroDefault, u'files.ext', self.ui_FileLocDialog.comboOutputFormat.currentText ) )
self.ui_FileLocDialog.sbCLevel.valueChanged.connect( functools.partial(
self.updateZorroDefault, u'files.cLevel', self.ui_FileLocDialog.sbCLevel.value ) )
self.ui_OrienGainRefDialog.pbRun.pressed.connect( self.run_OrienGainRef )
# Cache and Qsub paths
self.tbOpenCachePath.clicked.connect( functools.partial(
self.openPathDialog, u'cachePath', True) )
self.tbOpenQsubHeader.clicked.connect( functools.partial(
self.openFileDialog, u'qsubHeader', True) )
# Common configuration
# self.comboRegistrator.currentIndexChanged.connect( functools.partial(
# self.updateDict, u"cfgCommon", "registrator", self.comboRegistrator.currentText ) )
self.comboTriMode.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"triMode", self.comboTriMode.currentText ) )
self.sbPeaksigThres.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"peaksigThres", self.sbPeaksigThres.value ) )
# TODO: have start/end frame in particle extraction
# self.sbStartFrame.valueChanged.connect( functools.partial(
# self.updateZorroDefault, "startFrame", self.sbStartFrame.value ) )
# self.sbEndFrame.valueChanged.connect( functools.partial(
# self.updateZorroDefault, "endFrame", self.sbEndFrame.value ) )
self.sbDiagWidth.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"diagWidth", self.sbDiagWidth.value ) )
self.sbAutomax.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"autoMax", self.sbAutomax.value ) )
self.cbSuppressOrigin.stateChanged.connect( functools.partial(
self.updateZorroDefault, u"suppressOrigin", self.cbSuppressOrigin.isChecked ) )
self.cbSavePNG.stateChanged.connect( functools.partial(
self.updateZorroDefault, u"savePNG", self.cbSavePNG.isChecked ) )
self.comboFilterMode.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"filterMode", self.comboFilterMode.currentText ) )
self.cbSaveMovie.stateChanged.connect( functools.partial(
self.updateZorroDefault, u"saveMovie", self.cbSaveMovie.isChecked ) )
self.comboAlignProgram.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"xcorrMode", self.comboAlignProgram.currentText ) )
self.comboCtfProgram.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"CTFProgram", self.comboCtfProgram.currentText ) )
# DEBUG
self.cbDebuggingOutput.stateChanged.connect( functools.partial(
self.updateDict, u"cfgCommon", u"DEBUG", self.cbDebuggingOutput.isChecked ) )
# Advanced configuration
self.sbShapePadX.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"shapePadded", (self.sbShapePadY.value,self.sbShapePadX.value) ) )
self.sbShapePadY.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"shapePadded", (self.sbShapePadY.value,self.sbShapePadX.value) ) )
self.sbFouCropX.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"fouCrop", (self.sbFouCropY.value,self.sbFouCropX.value) ) )
self.sbFouCropY.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"fouCrop", (self.sbFouCropY.value,self.sbFouCropX.value) ) )
self.cbDoBinning.stateChanged.connect( functools.partial(
self.binningControl, u"enable", self.cbDoBinning.isChecked ) )
self.sbBinCropX.valueChanged.connect( functools.partial(
self.binningControl, u"shapeBin", (self.sbBinCropY.value,self.sbBinCropX.value) ) )
self.sbBinCropY.valueChanged.connect( functools.partial(
self.binningControl, u"shapeBin", (self.sbBinCropY.value,self.sbBinCropX.value) ) )
self.sbPixelsize.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"pixelsize", self.sbPixelsize.value ) )
self.sbVoltage.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"voltage", self.sbVoltage.value ) )
self.sbC3.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"C3", self.sbC3.value ) )
self.sbGain.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"gain", self.sbGain.value ) )
self.sbMaxShift.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"maxShift", self.sbMaxShift.value ) )
self.comboOriginMode.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"originMode", self.comboOriginMode.currentText ) )
self.cbPreshift.stateChanged.connect( functools.partial(
self.updateZorroDefault, u"preShift", self.cbPreshift.isChecked ) )
self.cbSaveC.stateChanged.connect( functools.partial(
self.updateZorroDefault, u"saveC", self.cbSaveC.isChecked ) )
self.comboBmode.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"Bmode", self.comboBmode.currentText ) )
self.sbBrad.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"Brad", self.sbBrad.value ) )
self.comboWeightMode.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"weightMode", self.comboWeightMode.currentText ) )
self.comboPeakLocMethod.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"peakLocMode", self.comboPeakLocMethod.currentText ) )
self.sbSubpixReg.valueChanged.connect( functools.partial(
self.updateZorroDefault, u"subPixReg", self.sbSubpixReg.value ) )
self.comboShiftMethod.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"shiftMethod", self.comboShiftMethod.currentText ) )
# Cluster configuration
# Setup default values
self.comboClusterType.currentIndexChanged.connect( functools.partial(
self.updateDict, u"cfgCluster", u'cluster_type', self.comboClusterType.currentText ) )
self.sbNThreads.valueChanged.connect( functools.partial(
self.updateDict, u"cfgCluster", u'n_threads', self.sbNThreads.value ) )
self.sbNProcesses.valueChanged.connect( functools.partial(
self.updateDict, u"cfgCluster", u"n_processes", self.sbNProcesses.value ) )
self.sbNSyncs.valueChanged.connect( functools.partial(
self.updateDict, u"cfgCluster", u"n_syncs", self.sbNSyncs.value ) )
self.cbMultiprocessPlots.stateChanged.connect( functools.partial(
self.updateZorroDefault, u'plotDict.multiprocess', self.cbMultiprocessPlots.isChecked ) )
self.leCachePath.textEdited.connect( functools.partial(
self.updateZorroDefault, u'cachePath', self.leCachePath.text ))
self.leQsubHeaderFile.textEdited.connect( functools.partial(
self.updateDict, u"cfgCluster", u"qsubHeader", self.leQsubHeaderFile.text ) )
self.comboFFTWEffort.currentIndexChanged.connect( functools.partial(
self.updateZorroDefault, u"fftw_effort", self.comboFFTWEffort.currentText ) )
self.listFiles.itemActivated.connect( self.displaySelectedFile )
# All the Gauto's are line-edits so that we can have None or "" as the values
# Plus I don't have to worry about types.
self.leGautoBoxsize.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'boxsize', self.leGautoBoxsize.text ) )
self.leGautoDiameter.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'diameter', self.leGautoDiameter.text ) )
self.leGautoMin_Dist.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'min_dist', self.leGautoMin_Dist.text ) )
# Template
self.leGautoTemplates.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'T', self.leGautoTemplates.text ) )
self.tbGautoOpenTemplate.clicked.connect( functools.partial(
self.openFileDialog, u'gautoTemplates', True) )
# Template pixelsize?
self.leGautoAng_Step.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'ang_step', self.leGautoAng_Step.text ) )
self.leGautoSpeed.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'speed', self.leGautoSpeed.text ) )
self.leGautoCCCutoff.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'cc_cutoff', self.leGautoCCCutoff.text ) )
self.leGautoLsigma_D.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'lsigma_D', self.leGautoLsigma_D.text ) )
self.leGautoLsigma_Cutoff.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'lsigma_cutoff', self.leGautoLsigma_Cutoff.text ) )
self.leGautoLave_D.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'lave_D', self.leGautoLave_D.text ) )
self.leGautoLave_Max.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'lave_max', self.leGautoLave_Max.text ) )
self.leGautoLave_Min.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'lave_min', self.leGautoLave_Min.text ) )
self.leGautoLP.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'lp', self.leGautoLP.text ) )
self.leGautoHP.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'hp', self.leGautoHP.text ) )
self.leGautoLPPre.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'pre_lp', self.leGautoLPPre.text ) )
self.leGautoHPPre.editingFinished.connect( functools.partial(
self.updateDict, u"cfgGauto", u'pre_hp', self.leGautoHPPre.text ) )
# Flags go into cfgGPlot, simply so I can use a generator on cfgGauto and handle these manually
self.cbGautoDoprefilter.stateChanged.connect( functools.partial(
self.updateDict, u"cfgGplot", u"do_pre_filter", self.cbGautoDoprefilter.isChecked ) )
self.cbGautoPlotCCMax.stateChanged.connect( functools.partial(
self.updateDict, u"cfgGplot", u"write_ccmax_mic", self.cbGautoPlotCCMax.isChecked ) )
self.cbGautoPlotPref.stateChanged.connect( functools.partial(
self.updateDict, u"cfgGplot", u"write_pref_mic", self.cbGautoPlotPref.isChecked ) )
self.cbGautoPlotBG.stateChanged.connect( functools.partial(
self.updateDict, u"cfgGplot", u"write_bg_mic", self.cbGautoPlotBG.isChecked ) )
self.cbGautoPlotBGFree.stateChanged.connect( functools.partial(
self.updateDict, u"cfgGplot", u"write_bgfree_mic", self.cbGautoPlotBGFree.isChecked ) )
self.cbGautoPlotLsigma.stateChanged.connect( functools.partial(
self.updateDict, u"cfgGplot", u"write_lsigma_mic", self.cbGautoPlotLsigma.isChecked ) )
self.cbGautoPlotMask.stateChanged.connect( functools.partial(
self.updateDict, u"cfgGplot", u"write_mic_mask", self.cbGautoPlotMask.isChecked ) )
# Toolbar buttons
self.tbDeleteFile.clicked.connect( self.deleteSelected )
self.tbRun.clicked.connect( self.runSkulk )
self.tbKillAll.clicked.connect( self.killSkulk )
self.tbKillAll.setEnabled(False)
self.tbReprocess.clicked.connect( self.reprocessSelected )
self.tbParticlePick.clicked.connect( self.particlePick )
# Plots setup
# What I can do is make a list of viewWidgets so I can iterate through them
# This gives more flexibility for different arrangements in the future
self.viewWidgetList = []
self.viewWidgetList.append( self.viewWidget1 )
self.viewWidgetList.append( self.viewWidget2 )
self.viewWidgetList.append( self.viewWidget3 )
self.viewWidgetList.append( self.viewWidget4 )
self.viewWidgetList.append( self.viewWidget5 )
self.viewWidgetList.append( self.viewWidget6 )
for index, viewWidg in enumerate( self.viewWidgetList ):
viewWidg.viewNumber = index
viewWidg.autoParent = self
# Try to load an ini file in the startup directory if it's present.
#try:
# iniList = glob.glob( os.path.join( origdir, '*.ini' ) )
iniList = glob.glob( u"*.ini" )
# Can't open a loadConfig dialog until the app has started, so only one .ini file can be in the directory.
if len( iniList ) == 1: # One .ini file
self.loadConfig( iniList[0] )
else:
defaultConfig = os.path.join( os.path.realpath(__file__), u'default.ini' )
if os.path.isfile( defaultConfig ):
print( "Loading default: %s" % defaultConfig )
self.loadConfig( defaultConfig )
# except:
# try:
# self.loadConfig( u'default.ini' )
# except:
# print( "Using default Zorro parameters" )
# Check for auxiliary programs
if not bool(testing):
self.validateAuxTools()
# Setup preferred popout function
self.preferPopout()
self.skulk.inspectLogDir() # Let's see if we can run this once...
if not bool(testing):
self.MainWindow.showMaximized()
self.exec_()
def validateAuxTools(self):
#self.comboCompressionExt.setEnabled(True)
#self.cbDoCompression.setEnabled(True)
self.pageGautoConfig.setEnabled(True)
self.tbParticlePick.setEnabled(True)
warningMessage = ""
# Check for installation of lbzip2, pigz, and 7z
#if not bool( zorro.util.which('lbzip2') ) and not bool( zorro.util.which('7z') ) and not bool( zorro.util.which('pigz') ):
# warningMessage += u"Disabling compression: None of lbzip2, pigz, or 7z found.\n"
# # TODO: limit compress_ext if only one is found?
# self.comboCompressionExt.setEnabled(False)
# self.cbDoCompression.setEnabled(False)
# Check for installation of CTFFIND/GCTF
if not bool( zorro.util.which('ctffind') ):
# Remove CTFFIND4 from options
warningMessage += u"Disabling CTFFIND4.1: not found.\n"
self.comboCtfProgram.removeItem( self.comboCtfProgram.findText( 'CTFFIND4.1') )
self.comboCtfProgram.removeItem( self.comboCtfProgram.findText( 'CTFFIND4.1, sum') )
if not bool( zorro.util.which('gctf') ):
warningMessage += u"Disabling GCTF: not found.\n"
self.comboCtfProgram.removeItem( self.comboCtfProgram.findText( 'GCTF') )
self.comboCtfProgram.removeItem( self.comboCtfProgram.findText( 'GCTF, sum') )
# Check for installation of Gautomatch
if not bool( zorro.util.which('gautomatch') ):
warningMessage += u"Disabling particle picking: Gautomatch not found.\n"
self.pageGautoConfig.setEnabled(False)
self.tbParticlePick.setEnabled(False)
if not bool( zorro.util.which('2dx_viewer') ):
warningMessage += u"2dx_viewer not found, using IMS for pop-out views.\n"
self.actionPrefer2dx_viewer.setEnabled( False )
self.actionPreferIms.setChecked(True)
if bool( warningMessage ):
warnBox = QtGui.QMessageBox()
warnBox.setText( warningMessage )
warnBox.exec_()
def initClusterConfig(self):
# Setup default dicts so we have some values.
import numexprz as nz
def countPhysicalProcessors():
# This simply doesn't work on Windows with iCore5 for example.
cpuInfo = nz.cpu.info
physicalIDs = []
for J, cpuDict in enumerate( cpuInfo ):
if not cpuDict['physical id'] in physicalIDs:
physicalIDs.append( cpuDict['physical id'] )
return len( physicalIDs )
try:
cpuCoresPerProcessor = np.int(nz.cpu.info[0]['cpu cores'])
self.cfgCluster = { u'n_threads':cpuCoresPerProcessor,
u'n_processes':countPhysicalProcessors(), u'n_syncs':2,
u'cluster_type': u'local', u'qsubHeader':u"" }
except:
print( "Failed to determine number of CPU cores, defaulting to number of virtual cores" )
# Use default values if the system can't figure things out for itself
self.cfgCluster = { u'n_threads': len(nz.cpu.info), u'n_processes':1, u'n_syncs':2,
u'cluster_type': u'local', u'qsubHeader':u"" }
self.sbNProcesses.setValue( self.cfgCluster[u'n_processes'] )
self.sbNThreads.setValue( self.cfgCluster[u'n_threads'] )
self.sbNSyncs.setValue( self.cfgCluster[u'n_syncs'] )
def joinIconPaths(self):
# Icon's aren't pathed properly if the CWD is somewhere else than the source folder, so...
self.source_dir = os.path.dirname( os.path.realpath(__file__) )
# Join all the icons and reload them
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/CINAlogo.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.MainWindow.setWindowIcon(icon)
self.label_2.setPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/CINAlogo.png")))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/folder.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbOpenCachePath.setIcon(icon1)
self.tbOpenQsubHeader.setIcon(icon1)
self.tbGautoOpenTemplate.setIcon(icon1)
self.ui_FileLocDialog.tbOpenGainRefPath.setIcon(icon1)
self.ui_FileLocDialog.tbOpenFiguresPath.setIcon(icon1)
self.ui_FileLocDialog.tbOpenInputPath.setIcon(icon1)
self.ui_FileLocDialog.tbOpenOutputPath.setIcon(icon1)
self.ui_FileLocDialog.tbOpenRawPath.setIcon(icon1)
self.ui_FileLocDialog.tbOpenSumPath.setIcon(icon1)
self.ui_FileLocDialog.tbOpenAlignPath.setIcon(icon1)
self.ui_OrienGainRefDialog.tbOrientGain_GainRef.setIcon(icon1)
self.ui_OrienGainRefDialog.tbOrientGain_TargetStack.setIcon(icon1)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/go-next.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbRun.setIcon(icon2)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/process-stop.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbKillAll.setIcon(icon3)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/user-trash.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbDeleteFile.setIcon(icon4)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/boxes.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbParticlePick.setIcon(icon5)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/view-refresh.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbReprocess.setIcon(icon6)
def preferPopout( self ):
preferredText = self.actionGroupPopout.checkedAction().text()
for vw in self.viewWidgetList:
vw.popout = preferredText
pass
def runSkulk( self ):
# Clean-up the file list
for J in np.arange( self.listFiles.count(), -1, -1 ):
self.listFiles.takeItem( J )
# Check pathing, ask user to set if any fields are missing
if not self.checkValidPaths():
return
print( "##########IN runSkulk ############" )
print( self.cfgCluster )
# Init hosts and otherwise reset the skulkManager
self.skulk.initHosts( cluster_type = self.cfgCluster[u'cluster_type'],
n_processes = self.cfgCluster[u'n_processes'],
n_threads = self.cfgCluster[u'n_threads'],
n_syncs = self.cfgCluster[u'n_syncs'],
qsubHeader = self.cfgCluster[u'qsubHeader'] )
# Make a new thread
self.skulk.start()
# Disable GUI elements related to clusters
self.tbKillAll.setEnabled(True)
self.tbRun.setEnabled(False)
self.pageClusterConfig.setEnabled(False)
self.menuAnalysis.setEnabled(False)
def killSkulk( self ):
print( "Killing all the foxes in the skulk" )
self.skulk.kill()
#if self.skulkThread != None:
# self.skulkThread.join()
#self.skulkThread.exit()
# Re-enable locked UI elements
self.tbKillAll.setEnabled(False)
self.tbRun.setEnabled(True)
self.pageClusterConfig.setEnabled(True)
self.menuAnalysis.setEnabled(True)
@QtCore.Slot()
def updateFromSkulk( self, state_id, name, command ):
"""
This is a message from the skulk manager that it's had a file change. Remember that it's typed as
the signal is Qt, so if you change it in skulkManager you need to change the static declaration.
Valid colors:
u'aliceblue', u'antiquewhite', u'aqua', u'aquamarine', u'azure', u'beige', u'bisque', u'black',
u'blanchedalmond', u'blue', u'blueviolet', u'brown', u'burlywood', u'cadetblue', u'chartreuse',
u'chocolate', u'coral', u'cornflowerblue', u'cornsilk', u'crimson', u'cyan', u'darkblue', u'darkcyan',
u'darkgoldenrod', u'darkgray', u'darkgreen', u'darkgrey', u'darkkhaki', u'darkmagenta',
u'darkolivegreen', u'darkorange', u'darkorchid', u'darkred', u'darksalmon', u'darkseagreen',
u'darkslateblue', u'darkslategray', u'darkslategrey', u'darkturquoise', u'darkviolet', u'deeppink',
u'deepskyblue', u'dimgray', u'dimgrey', u'dodgerblue', u'firebrick', u'floralwhite', u'forestgreen',
u'fuchsia', u'gainsboro', u'ghostwhite', u'gold', u'goldenrod', u'gray', u'green', u'greenyellow',
u'grey', u'honeydew', u'hotpink', u'indianred', u'indigo', u'ivory', u'khaki', u'lavender',
u'lavenderblush', u'lawngreen', u'lemonchiffon', u'lightblue', u'lightcoral', u'lightcyan',
u'lightgoldenrodyellow', u'lightgray', u'lightgreen', u'lightgrey', u'lightpink', u'lightsalmon',
u'lightseagreen', u'lightskyblue', u'lightslategray', u'lightslategrey', u'lightsteelblue',
u'lightyellow', u'lime', u'limegreen', u'linen', u'magenta', u'maroon', u'mediumaquamarine',
u'mediumblue', u'mediumorchid', u'mediumpurple', u'mediumseagreen', u'mediumslateblue',
u'mediumspringgreen', u'mediumturquoise', u'mediumvioletred', u'midnightblue', u'mintcream',
u'mistyrose', u'moccasin', u'navajowhite', u'navy', u'oldlace', u'olive', u'olivedrab', u'orange',
u'orangered', u'orchid', u'palegoldenrod', u'palegreen', u'paleturquoise', u'palevioletred',
u'papayawhip', u'peachpuff', u'peru', u'pink', u'plum', u'powderblue', u'purple', u'red', u'rosybrown',
u'royalblue', u'saddlebrown', u'salmon', u'sandybrown', u'seagreen', u'seashell', u'sienna',
u'silver', u'skyblue', u'slateblue', u'slategray', u'slategrey', u'snow', u'springgreen', u'steelblue',
u'tan', u'teal', u'thistle', u'tomato', u'transparent', u'turquoise', u'violet', u'wheat', u'white',
u'whitesmoke', u'yellow', u'yellowgreen']
"""
# If we rename or delete the file, try to get it by state_id
# print( "Updating file %s with id %s to color/state %s" %(name,state_id,color) )
fullName = name
baseName = os.path.basename( name )
if command == 'rename':
if self.skulk.DEBUG:
print( "RENAME: (%s) %s " % (state_id, self.stateIds[state_id]) )
try:
oldName = self.stateIds.pop( state_id )
self.reverseIds.pop( oldName )
except KeyError:
raise KeyError( "Automator: Could not find state# %s in ID dict" % state_id )
return
listItem = self.listFiles.findItems( oldName, QtCore.Qt.MatchFixedString )
if len( listItem ) > 0:
listItem = listItem[0]
oldListName = listItem.text()
self.listFiles.takeItem( self.listFiles.row( listItem ) )
else:
print( "DEBUG RENAME: Failed to find oldName: %s" % oldName )
# We really have to remove the listItem as it seems Qt passes us a
# copy instead of a pointer. I.e. updates by setText dont' work.
listItem = QtGui.QListWidgetItem( baseName )
self.listFiles.addItem( listItem )
newListName = listItem.text()
print( "DEBUG RENAME: (%s) from: %s, to: %s" %( state_id, oldListName, newListName ) )
# We need to update our dicts
self.stateIds[state_id] = baseName
self.reverseIds[baseName] = state_id
# List should self sort
return
elif command == 'delete':
if self.skulk.DEBUG:
print( "DELETE: (%s) %s " % (state_id, self.stateIds[state_id]) )
try:
oldName = self.stateIds.pop( state_id )
self.reverseIds.pop( oldName )
except KeyError:
raise KeyError( "Automator: Could not find state# %s in ID dict" % state_id )
return
listItem = self.listFiles.findItems( baseName, QtCore.Qt.MatchFixedString )
if len( listItem ) > 0:
listItem = listItem[0]
self.listFiles.takeItem( self.listFiles.row( listItem ) )
return
elif command == u'indigo': # u'Finished'
self.statusbar.showMessage( "Finished processing: " + fullName )
if name != None:
# Update the id - key combination
self.stateIds[state_id] = baseName
self.reverseIds[baseName] = state_id
else:
# If name == None, we want to ID the key by its id number
baseName = self.stateIds[state_id]
# Check to see if the item exists already
listItem = self.listFiles.findItems( baseName, QtCore.Qt.MatchFixedString )
if len(listItem) == 0:
# New id-key pair
listItem = QtGui.QListWidgetItem( baseName )
self.listFiles.addItem( listItem )
else:
listItem = listItem[0]
# Can't compare QtGui.QListItem to None, so just use a try
try:
if command != None:
listItem.setForeground( QtGui.QBrush( QtGui.QColor( u""+command ) ) )
listItem.setToolTip( "%s: %s" % (TOOLTIP_STATUS[command],fullName) )
else:
listItem.setForeground( QtGui.QBrush( QtGui.QColor( u"black" ) ) )
listItem.setToolTip( u"Unknown" )
pass
except:
pass
# Sort it?Should be automatic
# sizeFini = len( self.skulk.completeCounter )
sizeTotal = len( self.skulk )
self.labelMovie.setText( "Stack browser: %d / %d tot " % (self.skulk.completedCount, sizeTotal) )
def displaySelectedFile( self, item ):
# Get the ZorroObj from the stack browser
name = item.text()
# print( "Search for %s" % name + " in %s " % self.stateIds )
reverseState = {v: k for k, v in self.stateIds.items()}
if name in reverseState:
#if self.skulk.DEBUG:
# print( "Trying to update name: " + str(name) + ", " + str(reverseState[name]) )
self.updateAllViews( zorroObj = self.skulk[reverseState[name]].zorroObj )
def deleteSelected( self ):
itemList = self.listFiles.selectedItems()
confirmBox = QtGui.QMessageBox()
filenameString = ""
for item in itemList:
filenameString += item.text() + "\n"
confirmBox.setText( "Are you sure you want to delete all files related to: %s" % filenameString )
confirmBox.addButton( QtGui.QMessageBox.Cancel )
deleteButton = confirmBox.addButton( "Delete", QtGui.QMessageBox.ActionRole )
confirmBox.setDefaultButton( QtGui.QMessageBox.Cancel )
confirmBox.exec_()
if confirmBox.clickedButton() == deleteButton:
reverseState = {v: k for k, v in self.stateIds.items()}
for item in itemList:
#item = self.listFiles.currentItem()
if item is None:
continue
state_id = reverseState[item.text()]
# Delete everything
self.skulk.remove( state_id )
# The skulk will remove the item with a signal
def reprocessSelected( self ):
# item = self.listFiles.currentItem()
itemList = self.listFiles.selectedItems()
reverseState = {v: k for k, v in self.stateIds.items()}
for item in itemList:
if item is None: continue
self.skulk.reprocess( reverseState[item.text()] )
if self.skulk.DEBUG:
print( "DEBUG: stateIds = " + str(self.stateIds) )
print( "DEBUG: reverseIds = " + str(self.reverseIds) )
def particlePick( self ):
from . import Gautoauto
itemList = self.listFiles.selectedItems()
if len( itemList ) == 0:
return
sumList = []
pngFronts = []
for item in itemList:
if item is None: continue
# Get the zorro obj and feed the imageSum or filtSum to execGautoMatch
# Maybe it would be safer to load the zorroObj explicitely?
zorroState = self.skulk[ self.reverseIds[ item.text() ] ]
if 'filt' in zorroState.zorroObj.files:
sumName = zorroState.zorroObj.files['filt']
sumList.append( sumName )
elif 'sum' in zorroState.zorroObj.files:
sumName = zorroState.zorroObj.files['sum']
sumList.append( sumName )
else:
print( "Could not find image sum for %s to particle pick" % item.text() )
try:
print( "zorroState.zorroObj.files: " + str(zorroState.zorroObj.files) )
except:
pass
return
stackBase = os.path.basename( os.path.splitext(zorroState.zorroObj.files['stack'])[0] )
pngFileFront = os.path.join( zorroState.zorroObj.files['figurePath'], stackBase )
pngFronts.append( pngFileFront )
automatchName = os.path.splitext( sumName )[0] + "_automatch.star"
rejectedName = os.path.splitext( sumName )[0] + "_rejected.star"
# Consider adding boxMask to the zorroObj files by default?
zorroState.zorroObj.files['figBoxMask'] = pngFileFront + "_boxMask.png"
zorroState.zorroObj.files['automatchBox'] = automatchName
zorroState.zorroObj.files['rejectedBox'] = rejectedName
# Update the config file on disk to reflect the boxes
zorroState.zorroObj.saveConfig()
# Submit job, this should be a new thread
print( "===== TODO: launch Gautomatch in seperate thread as it blocks =====" )
self.cfgGplot['edge'] = 64
self.cfgGauto['apixM'] = 10.0*zorroState.zorroObj.pixelsize # This assumes all micrographs in the same directory have the same PS
self.cfgGplot['colorMap'] = 'viridis'
self.cfgGplot['boxAlpha'] = 0.5
self.cfgGplot['shapeOriginal'] = zorroState.zorroObj.shapeOriginal
self.cfgGplot['binning'] = 4
# DEBUG
self.cfgGplot['write_bg_mic'] = True
self.cfgGauto['diameter'] = 260
# Multiprocessed batch mode
#Gautoauto.batchProcess( sumList, pngFronts, self.cfgGauto, self.cfgGplot, n_processes=self.sbGautoNProcesses.value() )
# DEBUG: don't use multiprocessing, as error messages are useless
for J, sumName in enumerate(sumList):
# params = [mrcName, mode, optInit, optPlot, optRefine]
# mrcNames, pngFronts, optInit, optPlot, optRefine=None, n_processes=4 ):
Gautoauto.runGauto( [sumList[J],pngFronts[J],'batch', self.cfgGauto, self.cfgGplot, None] )
def checkValidPaths( self ):
errorState, errorText = self.skulk.paths.validate()
if bool( errorState ):
errorBox = QtGui.QMessageBox()
errorBox.setText( errorText )
errorBox.addButton( QtGui.QMessageBox.Ok )
errorBox.setDefaultButton( QtGui.QMessageBox.Ok )
errorBox.exec_()
return not errorState
def updateAllViews( self, zorroObj = None ):
for viewWidg in self.viewWidgetList:
viewWidg.updateZorroObj( zorroObj = zorroObj )
pass
def binningControl( self, command, funcHandle, funcArg=None ):
# Basically this is just to all us to set shapeBinned = None in the case that the user doesn't want to
# do binning because that is the check inside Zorro
if command == 'enable':
value = funcHandle()
if bool(value):
self.sbBinCropY.setEnabled(True); self.sbBinCropX.setEnabled(True)
self.zorroDefault.shapeBinned = [self.sbBinCropY.value(), self.sbBinCropX.value()]
else:
self.sbBinCropY.setEnabled(False); self.sbBinCropX.setEnabled(False)
self.zorroDefault.shapeBinned = None
if command == 'shapeBin':
self.zorroDefault.shapeBinned = [self.sbBinCropY.value(), self.sbBinCropX.value()]
def updateDict( self, dictHandle, key, funcHandle, funcarg = None ):
# This is not mydict, this is a copy of mydict! Ergh...
if type(dictHandle) == str or ( sys.version_info.major == 2 and type(dictHandle) == unicode):
parts = dictHandle.split('.')
partHandle = self
for part in parts:
partHandle = getattr( partHandle, part )
dictHandle = partHandle
dictHandle[key] = funcHandle()
if key == u"DEBUG":
self.skulk.setDEBUG( self.cbDebuggingOutput.isChecked() )
if self.skulk.DEBUG:
print( "updateDict: [ %s ] : %s " % (key, dictHandle[key] ) )
#if key == u'n_threads':
# for hostName, hostObj in self.skulk.procHosts:
# hostObj.n_threads = funcHandle()
# pass
def updateZorroDefault( self, zorroAttrName, funcHandle, funcArg = None ):
if isinstance( funcHandle, tuple):
newVal = list( func() for func in funcHandle )
else:
newVal = funcHandle()
# Check if we have a dict by splitting on '.', so i.e. plotDict.multiprocess => plotDict['mulitprocess']
tokens = zorroAttrName.split('.')
if self.skulk.DEBUG:
try:
print( "Changing zorroDefault."+ tokens + " from: " +
str(self.zorroDefault.__getattribute__(tokens[0])) + " to: " + str(newVal) )
except: pass
if newVal == 'none':
newVal = None
if len(tokens) == 1: # Class attribute
self.zorroDefault.__setattr__( tokens[0], newVal )
elif len(tokens) == 2: # Class dict
# Get the dict and set it by the provided key
handle = getattr( self.zorroDefault, tokens[0] )
handle[tokens[1]] = newVal
# Stealing reference
self.skulk.zorroDefault = self.zorroDefault
def run_OrienGainRef( self ):
from zorro.scripts import orientGainReference
"""
def orientGainRef( gainRefName, stackName,
stackIsInAHole=True, applyHotPixFilter = True, doNoiseCorrelation=True,
relax=0.95, n_threads = None )
"""
self.ui_OrienGainRefDialog.progressBar.setMaximum(0)
self.ui_OrienGainRefDialog.progressBar.show()
self.ui_OrienGainRefDialog.progressBar.setValue(0)
# Maybe this will have to be a subprocess with argv, if you want to have
# a progress bar? Ugh, what a pain...
try:
orientation = orientGainReference.orientGainRef(
self.ui_OrienGainRefDialog.leInputPath.text(),
self.ui_OrienGainRefDialog.leGainRefPath.text(),
stackIsInAHole = self.ui_OrienGainRefDialog.cbStackInHole.isChecked(),
applyHotPixFilter = self.ui_OrienGainRefDialog.cbApplyHotpixFilt.isChecked(),
doNoiseCorrelation = self.ui_OrienGainRefDialog.cbDoCorrel.isChecked(),
relax = self.ui_OrienGainRefDialog.sbHotpixRelax.value(),
n_threads = self.sbNThreads.value() )
self.ui_FileLocDialog.cbGainRot.setChecked( orientation[0] )
self.ui_FileLocDialog.cbGainVertFlip.setChecked( orientation[1] )
self.ui_FileLocDialog.cbGainHorzFlip.setChecked( orientation[2] )
except Exception as E:
print( E )
self.ui_OrienGainRefDialog.progressBar.setMaximum(100)
self.ui_OrienGainRefDialog.progressBar.hide()
def quitApp( self, event = None ):
print( "Shutting down: " + str(event) )
self.killSkulk()
# Try and save config if it was saved previously in the CWD
if self.cfgfilename in os.listdir('.'):
self.saveConfig( self.cfgfilename )
self.MainWindow.close()
self.FileLocDialog.close()
self.exit()
try:
sys.exit(0)
except SystemExit as e:
sys.exit(e)
except Exception:
raise
def loadConfig( self, cfgfilename ):
if cfgfilename is None:
# open a dialog and ask user to pick a file
cfgfilename = QtGui.QFileDialog.getOpenFileName( parent=self.MainWindow, caption="Load Initialization File",
dir="", filter="Ini files (*.ini)", selectedFilter="*.ini")[0]
if cfgfilename == '':
return
else:
self.cfgfilename = cfgfilename
self.centralwidget.blockSignals(True)
self.statusbar.showMessage( "Loaded config file: " + self.cfgfilename )
config = configparser.RawConfigParser(allow_no_value = True)
try:
config.optionxform = unicode # Python 2
except:
config.optionxform = str # Python 3
# Load all the zorro parameters into zorroDefault
self.zorroDefault.loadConfig( self.cfgfilename )
config.read( self.cfgfilename )
##### Common configuration ####
try:
self.cfgCommon = json.loads( config.get( u'automator', u'common' ) )
except: pass
try:
self.cbDebuggingOutput.setChecked( self.cfgCommon['DEBUG'] )
except: pass
if u"version" in self.cfgCommon and __version__ > self.cfgCommon[u"version"]:
print( "WARNING: Automator (%s) is not backward compatible with %s, version %s" %
(__version__, cfgfilename,self.cfgCommon[u"version"] ) )
return
##### Paths #####
try:
# Cannot do straight assignment with this because it's not a dict
# and we have no constructor with a dict.
norm_paths = json.loads( config.get(u'automator', u'paths' ) )
for key in norm_paths:
self.skulk.paths[key] = norm_paths[key]
except: pass
try:
self.ui_FileLocDialog.leInputPath.setText( self.skulk.paths.get_real(u'input_dir') )
except: pass
try:
self.ui_FileLocDialog.leOutputPath.setText( self.skulk.paths.get_real(u'output_dir') )
except: pass
# try:
# self.ui_FileLocDialog.leRawPath.setText( self.skulk.paths.get_real(u'cwd') )
# except: pass
try:
self.ui_FileLocDialog.leRawPath.setText( self.skulk.paths.get_real(u'raw_subdir') )
except: pass
try:
self.ui_FileLocDialog.leSumPath.setText( self.skulk.paths.get_real(u'sum_subdir') )
except: pass
try:
self.ui_FileLocDialog.leAlignPath.setText( self.skulk.paths.get_real(u'align_subdir') )
except: pass
try:
self.ui_FileLocDialog.leFiguresPath.setText( self.skulk.paths.get_real(u'fig_subdir') )
for viewWdgt in self.viewWidgetList:
viewWdgt.viewCanvas.param['figurePath'] = self.skulk.paths.paths[u'fig_subdir']
except: pass
try:
self.ui_FileLocDialog.leGainRefPath.setText( self.skulk.paths.get_real(u'gainRef') )
except: pass
try:
self.ui_FileLocDialog.comboCompressor.setCurrentIndex(
self.ui_FileLocDialog.comboCompressor.findText(
self.zorroDefault.files['compressor'] ) )
except: pass
try:
self.ui_FileLocDialog.cbGainHorzFlip.setChecked(
self.zorroDefault.gainInfo['Horizontal'])
self.ui_FileLocDialog.cbGainVertFlip.setChecked(
self.zorroDefault.gainInfo['Vertical'])
self.ui_FileLocDialog.cbGainRot.setChecked(
self.zorroDefault.gainInfo['Diagonal'])
except: pass
try:
self.ui_FileLocDialog.sbCLevel.setValue( int(self.zorroDefault.files['cLevel']) )
except: pass
##### views #####
for viewWidg in self.viewWidgetList:
try:
viewWidg.loadConfig( config )
pass
except: pass
###### Common configuration #####
###### Cluster configuration #####
try:
self.cfgCluster = json.loads( config.get( u'automator', u'cluster' ) )
except:
pass
try:
# self.cfgCluster["cluster_type"] = config.get( 'automator_cluster', 'cluster_type' )
self.comboClusterType.setCurrentIndex( self.comboClusterType.findText( self.cfgCluster[u"cluster_type"] ) )
except: print( "Failed to set cluster_type: " )
try:
# self.cfgCluster["n_processes"] = config.getint( 'automator_cluster', 'n_processes' )
self.sbNProcesses.setValue( self.cfgCluster[u"n_processes"] )
except: print( "Failed to set n_processes " )
try:
# self.cfgCluster["n_syncs"] = config.getint( 'automator_cluster', 'n_syncs' )
self.sbNSyncs.setValue( self.cfgCluster[u"n_syncs"] )
except: print( "Failed to set n_syncs " )
try:
# self.cfgCluster["n_threads"] = config.getint( 'automator_cluster', 'n_threads' )
self.sbNThreads.setValue( self.cfgCluster[u"n_threads"] )
except: print( "Failed to set n_threads " )
try:
# self.cfgCluster["qsubHeader"] = config.get( 'automator_cluster', 'qsubHeader' )
self.leQsubHeaderFile.setText( self.cfgCluster[u"qsubHeader"] )
except: print( "Failed to set qsubHeader " )
try: self.leCachePath.setText( self.zorroDefault.cachePath )
except: pass
try: self.comboFFTWEffort.setCurrentIndex( self.comboFFTWEffort.findText( self.zorroDefault.fftw_effort ) )
except: pass
try: self.cbMultiprocessPlots.setChecked( self.zorroDefault.plotDict[u"multiprocess"] )
except: print( "Failed to set multiprocessing option for plots." )
###### Gautomatch configuration #####
# Update all the GUI elements
try: self.comboTriMode.setCurrentIndex( self.comboTriMode.findText( self.zorroDefault.triMode ) )
except: pass
try: self.sbPeaksigThres.setValue( self.zorroDefault.peaksigThres )
except: pass
#try: self.sbStartFrame.setValue( self.zorroDefault.startFrame )
#except: pass
#try: self.sbEndFrame.setValue( self.zorroDefault.endFrame )
#except: pass
try: self.sbDiagWidth.setValue( self.zorroDefault.diagWidth )
except: pass
try: self.sbAutomax.setValue( self.zorroDefault.autoMax )
except: pass
try: self.cbSuppressOrigin.setChecked( self.zorroDefault.suppressOrigin )
except: pass
try: self.comboCtfProgram.setCurrentIndex( self.comboCtfProgram.findText( self.zorroDefault.CTFProgram ) )
except: print( "Unknown CTF tool: " + str(self.zorroDefault.CTFProgram) )
try: self.comboFilterMode.setCurrentIndex( self.comboFilterMode.findText( self.zorroDefault.filterMode ) )
except: print( "Unknown filter mode: " + str(self.zorroDefault.filterMode) )
try: self.cbSavePNG.setChecked( self.zorroDefault.savePNG )
except: pass
try: self.cbSaveMovie.setChecked( self.zorroDefault.saveMovie )
except: pass
try: # Easier to copy-over both values than it is to
shapePadded = np.copy( self.zorroDefault.shapePadded )
self.sbShapePadX.setValue( shapePadded[1] )
self.sbShapePadY.setValue( shapePadded[0] )
except: print( "Failed to set sbShapePadX-Y" )
print( self.zorroDefault.shapeBinned )
try: # This is easier then blocking all the signals...
if np.any(self.zorroDefault.shapeBinned) == None:
self.cbDoBinning.setChecked( False )
else:
shapeBinned = np.copy( self.zorroDefault.shapeBinned )
self.sbBinCropX.setValue( shapeBinned[1] )
self.sbBinCropY.setValue( shapeBinned[0] )
self.cbDoBinning.setChecked( True )
except: print( "Failed to set sbBinCropX-Y" )
try:
fouCrop = np.copy( self.zorroDefault.fouCrop )
self.sbFouCropX.setValue( fouCrop[1] )
self.sbFouCropY.setValue( fouCrop[0] )
except: pass
try: self.sbPixelsize.setValue( self.zorroDefault.pixelsize )
except: pass
try: self.sbVoltage.setValue( self.zorroDefault.voltage )
except: pass
try: self.sbC3.setValue( self.zorroDefault.C3 )
except: pass
try: self.sbGain.setValue( self.zorroDefault.gain )
except: pass
try: self.sbMaxShift.setValue( self.zorroDefault.maxShift )
except: pass
try: self.comboOriginMode.setCurrentIndex( self.comboOriginMode.findText( self.zorroDefault.originMode ) )
except: pass
try: self.cbPreshift.setChecked( self.zorroDefault.preShift )
except: pass
try: self.cbSaveC.setChecked( self.zorroDefault.saveC )
except: pass
try: self.comboBmode.setCurrentIndex( self.comboBmode.findText( self.zorroDefault.Bmode ) )
except: pass
try: self.sbBrad.setValue( self.zorroDefault.Brad )
except: pass
try: self.comboWeightMode.setCurrentIndex( self.comboWeightMode.findText( self.zorroDefault.weightMode ) )
except: pass
try: self.sbSubpixReg.setValue( self.zorroDefault.subPixReg )
except: pass
try: self.comboShiftMethod.setCurrentIndex( self.comboShiftMethod.findText( self.zorroDefault.shiftMethod ) )
except: pass
# Gautomatch
try:
self.cfgGauto = json.loads( config.get( u'automator', u'gauto' ) )
except:
pass
try:
self.leGautoBoxsize.setText( self.cfgGauto[u'boxsize'] )
except: pass
try:
self.leGautoDiameter.setText( self.cfgGauto[u'diameter'] )
except: pass
try:
self.leGautoMin_Dist.setText( self.cfgGauto[u'min_dist'] )
except: pass
try:
self.leGautoTemplates.setText( self.cfgGauto[u'T'] )
except: pass
try:
self.leGautoAng_Step.setText( self.cfgGauto[u'ang_step'] )
except: pass
try:
self.leGautoSpeed.setText( self.cfgGauto[u'speed'] )
except: pass
try:
self.cfgGauto[u'cc_cutoff'] = config.get( u'automator_gauto', u'cc_cutoff' )
self.leGautoCCCutoff.setText( self.cfgGauto[u'cc_cutoff'] )
except: pass
try:
self.leGautoLsigma_D.setText( self.cfgGauto[u'lsigma_D'] )
except: pass
try:
self.leGautoLsigma_Cutoff.setText( self.cfgGauto[u'lsigma_cutoff'] )
except: pass
try:
self.leGautoLave_D.setText( self.cfgGauto[u'lave_D'] )
except: pass
try:
self.leGautoLave_Min.setText( self.cfgGauto[u'lave_min'] )
except: pass
try:
self.leGautoLave_Max.setText( self.cfgGauto[u'lave_max'] )
except: pass
try:
self.leGautoLP.setText( self.cfgGauto[u'lp'] )
except: pass
try:
self.leGautoHP.setText( self.cfgGauto[u'hp'] )
except: pass
try:
self.leGautoLPPre.setText( self.cfgGauto[u'pre_lp'] )
except: pass
try:
self.leGautoHPPre.setText( self.cfgGauto[u'pre_hp'] )
except: pass
# Plotting for Gautomatch
try:
self.cbGautoDoprefilter.setChecked( self.cfgGplot[u'do_pre_filter'] )
except: pass
try:
self.cbGautoPlotCCMax.setChecked( self.cfgGplot[u'write_ccmax_mic'] )
except: pass
try:
self.cbGautoPlotPref.setChecked( self.cfgGplot[u"write_pref_mic"] )
except: pass
try:
self.cbGautoPlotBG.setChecked( self.cfgGplot[u"write_bg_mic"] )
except: pass
try:
self.cbGautoPlotBGFree.setChecked( self.cfgGplot[u"write_bgfree_mic"] )
except: pass
try:
self.cbGautoPlotLsigmaFree.setChecked( self.cfgGplot[u"write_lsigma_mic"] )
except: pass
try:
self.cbGautoPlotMaskFree.setChecked( self.cfgGplot[u"write_mic_mask"] )
except: pass
self.centralwidget.blockSignals(False)
# End of Automator.loadConfig
def saveConfig( self, cfgfilename ):
if cfgfilename is None:
cfgfilename = QtGui.QFileDialog.getSaveFileName(
parent=self.MainWindow,caption="Save Initialization File", dir="", filter="Ini files (*.ini)",
selectedFilter="*.ini")[0]
if cfgfilename == '':
return
else:
# Force extension to .ini
cfgfilename = os.path.splitext( cfgfilename )[0] + ".ini"
self.cfgfilename = cfgfilename
self.statusbar.showMessage( "Saving configuration: " + cfgfilename )
self.cfgCommon[u"version"] = __version__
# # Overwrite the file if it already exists
self.zorroDefault.saveConfig( cfgfilename )
# Read in the config prepared by Zorro
config = configparser.RawConfigParser(allow_no_value = True)
try:
config.optionxform = unicode # Python 2
except:
config.optionxform = str # Python 3
config.read( cfgfilename ) # Read back in everything from Zorro
### PATH ###
config.add_section( u'automator' )
config.set( u'automator', u'paths', json.dumps( self.skulk.paths.to_json() ) )
config.set( u'automator', u'common', json.dumps(self.cfgCommon ) )
config.set( u'automator', u'cluster', json.dumps(self.cfgCluster ) )
config.set( u'automator', u'gauto', json.dumps(self.cfgGauto ) )
config.set( u'automator', u'gplot', json.dumps(self.cfgGplot ) )
# config.add_section('automator_paths')
# for key in self.skulk.paths:
# config.set( 'automator_paths', key, self.skulk.paths[key] )
#
# config.add_section('automator_common')
# for key in self.cfgCommon:
# config.set( 'automator_common', key, self.cfgCommon[key] )
#
# config.add_section('automator_cluster')
# for key in self.cfgCluster:
# config.set( 'automator_cluster', key, self.cfgCluster[key] )
#
# config.add_section('automator_gauto')
# for key in self.cfgGauto:
# config.set( 'automator_gauto', key, self.cfgGauto[key] )
#
# config.add_section('automator_gplot')
# for key in self.cfgGplot:
# config.set( 'automator_gplot', key, self.cfgGplot[key] )
# Add viewWidgets using their built-in config writers
for viewWidg in self.viewWidgetList:
viewWidg.saveConfig( config )
# try:
# # Open in append mode
cfgfh = open( self.cfgfilename, 'w' )
config.write( cfgfh )
cfgfh.close()
# except:
# print( "Error in loading config file: " + self.cfgfilename )
def openFileDialog( self, name, openDialog ):
if bool(openDialog):
pathDialog = QtGui.QFileDialog()
pathDialog.setFileMode( QtGui.QFileDialog.AnyFile )
newFile = str( pathDialog.getOpenFileName(self.MainWindow, name, "")[0] )
else:
# Get directory from the lineedit object
print( "TODO" )
if name == u'OrientGain_GainRef':
self.ui_OrienGainRefDialog.leGainRefPath.setText( newFile )
elif name == u'OrientGain_TargetStack':
self.ui_OrienGainRefDialog.leInputPath.setText( newFile )
elif name == 'qsubHeader':
self.cfgCluster[u'qsubHeader'] = newFile # Would prefer to generate a signal here.
self.leQsubHeaderFile.setText( newFile )
elif name == u'gautoTemplates':
self.cfgGauto[u'T'] = newFile
self.leGautoTemplates.setText( newFile )
elif name == u'gainRef':
# self.zorroDefault.files['gainRef'] = newFile
print( "openFileDialog: Setting gainRef to %s" % newFile )
self.skulk.paths['gainRef'] = newFile
self.ui_FileLocDialog.leGainRefPath.setText( newFile )
def openPathDialog( self, pathname, openDialog ):
# Comments on how to handle updates from a lineedit Qt object
# http://stackoverflow.com/questions/12182133/pyqt4-combine-textchanged-and-editingfinished-for-qlineedit
if bool(openDialog):
pathDialog = QtGui.QFileDialog()
pathDialog.setFileMode( QtGui.QFileDialog.Directory )
pathDialog.setOption( QtGui.QFileDialog.ShowDirsOnly, True )
newPath = str(pathDialog.getExistingDirectory(self.MainWindow,pathname, ""))
print( "New path for "+ pathname + " : " + newPath )
else:
# Get directory from the lineedit object
print( "TODO" )
#self.FileLocDialog.raise_()
self.FileLocDialog.activateWindow()
if pathname == u'input_dir':
self.skulk.paths[pathname] = newPath
self.ui_FileLocDialog.leInputPath.setText( self.skulk.paths.get_real(pathname) )
elif pathname == u'output_dir':
self.skulk.paths[pathname] = newPath
self.ui_FileLocDialog.leOutputPath.setText( self.skulk.paths.get_real(pathname) )
elif pathname == u'raw_subdir':
self.skulk.paths[pathname] = newPath
self.ui_FileLocDialog.leRawPath.setText( self.skulk.paths.get_real(pathname) )
elif pathname == u'sum_subdir':
self.skulk.paths[pathname] = newPath
self.ui_FileLocDialog.leSumPath.setText( self.skulk.paths.get_real(pathname) )
elif pathname == u'align_subdir':
self.skulk.paths[pathname] = newPath
self.ui_FileLocDialog.leAlignPath.setText( self.skulk.paths.get_real(pathname) )
elif pathname == u'fig_subdir':
self.skulk.paths[pathname] = newPath
self.ui_FileLocDialog.leFiguresPath.setText( self.skulk.paths.get_real(pathname) )
elif pathname == u'cachePath':
print( "TODO: Automator.openPathDialog; manage cachePath" )
pass
def showCitationsDialog(self):
citations = u"""
Zorro:
McLeod, R.A., Kowal, J., Ringler, P., Stahlberg, S., Submitted.
CTFFIND4:
Rohou, A., Grigorieff, N., 2015. CTFFIND4: Fast and accurate defocus estimation from electron micrographs. Journal of Structural Biology, Recent Advances in Detector Technologies and Applications for Molecular TEM 192, 216-221. doi:10.1016/j.jsb.2015.08.008
GCTF:
Zhang, K., 2016. Gctf: Real-time CTF determination and correction. Journal of Structural Biology 193, 1-12. doi:10.1016/j.jsb.2015.11.003
SerialEM:
Mastronarde, D.N. 2005. Automated electron microscope tomography using robust prediction of specimen movements. J. Struct. Biol. 152:36-51.
"""
citBox = QtGui.QMessageBox()
citBox.setText( citations )
citBox.exec_()
def showImsHelpDialog(self):
citBox = QtGui.QMessageBox()
citBox.setText( zorro.zorro_plotting.IMS_HELPTEXT )
citBox.exec_()
def main():
try:
mainGui = Automator()
except SystemExit:
del mainGui
sys.exit()
exit
# Instantiate a class
if __name__ == '__main__': # Windows multiprocessing safety
main() | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/automator/Automator.py | Automator.py |
from zorro import util
import mrcz
import numpy as np
import matplotlib.pyplot as plt
import os, os.path, glob
import subprocess as sp
import multiprocessing as mp
import time
import skimage.io
###### USER OPTIONS FOR Gautomatch ######
# Benchmarking with bs-dw36: 24 physical cores Xeon(R) CPU E5-2680 v3 @ 2.50GHz, one GTX980 GPU
# cina-haf02 (white computer) is probably of similar-speed
# 16 proc = broke
# 8 proc = 23.0 s (breaks sometimes)
# 4 proc = 23.7 s
# Bencmarking with bs-gpu01: 4 physical CPU cores Xeon CPU E5-2603 0 @ 1.80GHz, four Tesla C2075 GPUs
# 4 proc = 143 s
# 2 proc = 263 s
# Bencmarking with bs-gpu04: 4 physical CPU cores Xeon CPU E5-2603 0 @ 1.80GHz, four Tesla C2075 GPUs
# 8 proc = 90 s
# 4 proc = 42 s
# 2 proc = 100 s
def runGauto( params ):
"""
params = [mrcName, mode, optInit, optPlot, optRefine]
mode can be 'batch' or 'inspect'
"""
mrcName = params[0]
pngFrontName = params[1]
GautoMode = params[2]
optInit = params[3]
optPlot = params[4]
optRefine = params[5]
if optRefine == None:
optRefine = { 'doRefine':False }
if GautoMode == 'batch':
initRunGauto( mrcName, optInit, optPlot, optRefine )
if bool(optRefine['doRefine']):
optRefine['thresCC'] = refineRunGauto( mrcName )
else:
if 'cc_cutoff' in optInit:
optRefine['thresCC'] = optInit['cc_cutoff']
else:
optRefine['thresCC'] = 0.1 # Default value
if not bool(optRefine['thresCC']): optRefine['thresCC'] = 0.0
pass
# saveDiagnosticImages( mrcName, pngFrontName, optInit, optPlot, optRefine )
elif GautoMode == 'inspect':
optPlot['write_pref_mic'] = True; optPlot['write_ccmax_mic'] = True; optPlot['write_bg_mic'] = True;
optPlot['write_bgfree_mic'] = True; optPlot['write_lsigma_mic'] = True; optPlot['write_mic_mask'] = True;
initRunGauto( mrcName, optInit, optPlot, optRefine )
saveDiagnosticImages( mrcName, pngFrontName, optInit, optPlot, optRefine )
else:
print( "Unknown mode: %s"%GautoMode )
def initRunGauto( mrcName, optInit, optPlot, optRefine ):
try:
Gauto_exec = util.which( "gautomatch" )
except:
try:
Gauto_exec = util.which( "Gautomatch-v0.53_sm_20_cu7.5_x86_64" )
except:
raise SystemError( "Gautomatch not found in system path" )
devnull = open(os.devnull, 'w' )
extra_options = ""
if 'do_pre_filter' in optPlot and bool(optPlot['do_pre_filter']):
extra_options += " --do_pre_filter"
if 'write_pref_mic' in optPlot and bool(optPlot['write_pref_mic']):
extra_options += " --write_pref_mic"
if ('write_ccmax_mic' in optPlot and bool(optPlot['write_ccmax_mic'])) or bool(optRefine['doRefine']):
extra_options += " --write_ccmax_mic"
if 'write_bg_mic' in optPlot and bool(optPlot['write_bg_mic']):
extra_options += " --write_bg_mic"
if 'write_bgfree_mic' in optPlot and bool(optPlot['write_bgfree_mic']):
extra_options += " --write_bgfree_mic"
if 'write_lsigma_mic' in optPlot and bool(optPlot['write_lsigma_mic']):
extra_options += " --write_lsigma_mic"
if 'write_mic_mask' in optPlot and bool(optPlot['write_mic_mask']):
extra_options += " --write_mic_mask"
optGauto = " "
for (key,val) in optInit.items():
if bool(val):
optGauto += " --%s"%key + " " + str(val)
print( "********** GAUTOMATCH *****************" )
print( Gauto_exec + " " + mrcName + optGauto + extra_options )
sp.call( Gauto_exec + " " + mrcName + optGauto + extra_options, shell=True, stdout=devnull, stderr=devnull )
print( "***************************************" )
pass
def refineRunGauto( mrcName, optInit, optPlot, optRefine ):
try:
Gauto_exec = util.which( "Gautomatch" )
except:
try:
Gauto_exec = util.which( "Gautomatch-v0.53_sm_20_cu7.0_x86_64" )
except:
raise SystemError( "Gautomatch not found in system path" )
devnull = open(os.devnull, 'w' )
ccMaxName = os.path.splitext( mrcName )[0] + "_ccmax.mrc"
ccMax, _ = mrcz.readMRC( ccMaxName )
pdfCC, hX = np.histogram( ccMax[optPlot['edge']:-optPlot['edge'],optPlot['edge']:-optPlot['edge']], bins=512 )
pdfCC = pdfCC.astype('float32'); hX = hX[:-1]
cdfCC = np.cumsum( pdfCC )
cdfCC /= cdfCC[-1] # Normalize the cumulative sum to a CDF
# Find the threshold value for the cross-correlation cutoff
thresCC = hX[ np.argwhere( cdfCC > optRefine['cdfThres'] )[0][0] ]
extra_options = "" # No extra options for refine at present
optGauto = " "
copyOptInit = optInit.copy()
copyOptInit['cc_cutoff'] = thresCC
for (key,val) in copyOptInit.items():
if bool(val):
optGauto += " --%s"%key + " " + str(val)
# print( "Refinement Command: " + Gauto_exec + " " + mrcName + optGauto + extra_options )
sp.call( Gauto_exec + " " + mrcName + optGauto + extra_options, shell=True, stdout=devnull, stderr=devnull )
return thresCC
def saveDiagnosticImages( mrcName, pngFrontName, optInit, optPlot, optRefine ):
mrcFront = os.path.splitext( mrcName )[0]
print( "saveDiag looking for : " + mrcFront + "_automatch.box" )
boxName = mrcFront + "_automatch.star"
autoBoxes = np.loadtxt( boxName, comments="_", skiprows=4 )
if autoBoxes.size == 0: # Empty box
return
goodnessMetric = util.normalize( autoBoxes[:,4] )
boxes = (autoBoxes[:,0:2] ).astype( 'int' )
if 'thresCC' in optRefine:
print( "For %s picked %d boxes with CC_threshold of %.3f" %(mrcName, boxes.shape[0], np.float32(optRefine['thresCC'])) )
else:
print( "For %s picked %d boxes" %(mrcName, boxes.shape[0]) )
if not 'boxsize' in optInit:
if not 'diameter' in optInit:
# Ok, so we have no idea on the box size, so diameter is default of 400
optInit['diameter'] = 400.0
optInit['boxsize'] = optInit['diameter'] / optInit['apixM']
maskName = pngFrontName + "_boxMask.png"
generateAlphaMask( maskName, boxes.copy(), optInit['boxsize'], goodnessMetric.copy(), optPlot )
#print( optPlot )
if 'write_pref_min' in optPlot and bool(optPlot['write_pref_mic']):
diagName = mrcFront + "_pref.mrc"
pngName = pngFrontName + "_pref.png"
generatePNG( diagName, pngName, boxes.copy(), optInit['boxsize'], goodnessMetric.copy(), optPlot )
if 'write_ccmax_mic' in optPlot and bool(optPlot['write_ccmax_mic']):
diagName = mrcFront + "_ccmax.mrc"
pngName = pngFrontName + "_ccmax.png"
generatePNG( diagName, pngName, boxes.copy(), optInit['boxsize'], goodnessMetric.copy(), optPlot)
if 'write_bg_mic' in optPlot and bool(optPlot['write_bg_mic']):
diagName = mrcFront + "_bg.mrc"
pngName = pngFrontName + "_bg.png"
generatePNG( diagName, pngName, boxes.copy(), optInit['boxsize'], goodnessMetric.copy(), optPlot)
if 'write_bgfree_mic' in optPlot and bool(optPlot['write_bgfree_mic']):
diagName = mrcFront + "_bgfree.mrc"
pngName = pngFrontName + "_bgfree.png"
generatePNG( diagName, pngName, boxes.copy(), optInit['boxsize'], goodnessMetric.copy(), optPlot )
if 'write_lsigma_mic' in optPlot and bool(optPlot['write_lsigma_mic']):
diagName = mrcFront + "_lsigma.mrc"
pngName = pngFrontName + "_lsigma.png"
generatePNG( diagName, pngName, boxes.copy(), optInit['boxsize'], goodnessMetric.copy(), optPlot)
if 'write_mic_mask' in optPlot and bool(optPlot['write_mic_mask']):
diagName = mrcFront + "_mask.mrc"
pngName = pngFrontName + "_mask.png"
generatePNG( diagName, pngName, boxes.copy(), optInit['boxsize'], goodnessMetric.copy(), optPlot )
def generateAlphaMask( maskName, boxes, boxWidth, goodnessMetric, optPlot ):
"""
Generate a PNG that is mostly transparent except for the boxes which are also high alpha. To be
plotted on top of diagnostic images.
"""
# print( "optPlot['binning'] = %s, type: %s" % (str(optPlot['binning']),type(optPlot['binning']) ))
binShape = np.array( optPlot['shapeOriginal'] ) / optPlot['binning']
# print( "boxes: %s, type: %s" % (boxes, type(boxes)))
boxes = np.floor_divide( boxes, optPlot['binning'] )
boxMask = np.zeros( [binShape[0], binShape[1], 4], dtype='float32' )
# boxAlpha = 255*optPlot['boxAlpha']
boxAlpha = optPlot['boxAlpha']
colorMap = plt.get_cmap( optPlot['colorMap'] )
boxWidth2 = np.int( boxWidth/2 )
print( "DEBUG: writing box alpha mask for %d boxes" % boxes.shape[0] )
if boxes.ndim > 1:
for J in np.arange( boxes.shape[0] ):
color = np.array( colorMap( goodnessMetric[J] ) )
#color[:3] *= (255 * color[:3] )
color[3] = boxAlpha
print( "Box at %s has color %s" %(boxes[J,:],color))
# X-Y coordinates
try:
boxMask[ boxes[J,1]-boxWidth2:boxes[J,1]+boxWidth2, boxes[J,0]-boxWidth2:boxes[J,0]+boxWidth2,:3] += color[:3]
# Force even alpha even with overlapping boxes
boxMask[ boxes[J,1]-boxWidth2:boxes[J,1]+boxWidth2, boxes[J,0]-boxWidth2:boxes[J,0]+boxWidth2,:3] = color[3]
except:
pass # Don't draw anything
# Save diagnostic image
# We don't flip this because it's being plotted by matplotlib on top of our other diagnostic images.
plt.figure()
plt.imshow( boxMask )
plt.title( "boxMask before clipping" )
boxMask = (255* np.clip( boxMask, 0.0, 1.0) ).astype('uint8')
plt.figure()
plt.imshow( boxMask )
plt.title( "boxMask after clipping" )
plt.show( block=True )
# Maybe the default skimage plugin can't handle alpha?
skimage.io.imsave( maskName, boxMask )
def generatePNG( diagName, pngName, boxes, boxWidth, goodnessMetric, optPlot ):
###############################################
boxWidth2 = np.int( boxWidth/2 )
if not os.path.isfile( diagName ):
print( "File not found: %s"%diagName )
return
Mage, _ = mrcz.readMRC( diagName )
if Mage.shape[0] <= 512:
binning = 8
elif Mage.shape[0] <= 682:
binning = 6
elif Mage.shape[0] <= 1024:
binning = 4
elif Mage.shape[0] <= 2048:
binning = 2
else:
binning = 1
boxes = np.floor_divide( boxes, binning )
print( "DEBUG: binning = " + str(binning) )
# Cut off the edges where the images may be uneven
edge = np.floor_divide( optPlot['edge'], binning )
# Gautomatch ouputs 927 x 927, which is 3708 x 3708 binned by 4
x_special = 0
boxes[:,0] -= (edge + x_special)
# TODO: why is Y-axis origin of boxes wierd??? Ah-ha! Gautomatch is making every image rectangular!
# How to know this special value without loading original image?
y_special = np.floor_divide( (3838-3708) , (4*binning) )
boxes[:,1] += (edge + y_special)
cropMage = Mage[edge:-edge,edge:-edge]
# Stretch contrast limits
cutoff = 1E-3
clim = util.histClim( cropMage, cutoff=cutoff )
cropMage = ( 255 * util.normalize( np.clip( cropMage, clim[0], clim[1] ) ) ).astype('uint8')
# Make into grayscale RGB
cropMage = np.dstack( [cropMage, cropMage, cropMage] )
# Make a origin box for debugging
# cropMage[:edge,:edge,:] = np.zeros( 3, dtype='uint8' )
# Write in colored boxes for particle positions
colorMap = plt.get_cmap( optPlot['colorMap'] )
if boxes.ndim > 1:
for J in np.arange( boxes.shape[0] ):
color = np.array( colorMap( goodnessMetric[J] )[:-1] )
color /= np.sum( color )
# X-Y coordinates
# Did these somehow become box centers?
# boxElem = cropMage[ boxes[J,0]:boxes[J,0]+boxes[J,2], boxes[J,1]:boxes[J,1]+boxes[J,3], : ]
# boxElem = (1.0-optPlot['boxAlpha'])*boxElem + optPlot['boxAlpha']*color*boxElem
# cropMage[ boxes[J,0]:boxes[J,0]+boxes[J,2], boxes[J,1]:boxes[J,1]+boxes[J,3], : ] = boxElem.astype('uint8')
# Y-X coordinates
try:
boxElem = cropMage[ boxes[J,1]-boxWidth2:boxes[J,1]+boxWidth2, boxes[J,0]-boxWidth2:boxes[J,0]+boxWidth2,:]
boxElem = (1.0-optPlot['boxAlpha'])*boxElem + optPlot['boxAlpha']*color*boxElem
cropMage[ boxes[J,1]-boxWidth2:boxes[J,1]+boxWidth2, boxes[J,0]-boxWidth2:boxes[J,0]+boxWidth2,:] = boxElem.astype('uint8')
# Draw origin
# cropMage[ boxes[J,1],boxes[J,0],:] = np.array( [255, 0, 0], dtype='uint8' )
except:
pass # Don't draw anything
# Save diagnostic image
cropMage = np.flipud(cropMage)
skimage.io.imsave( pngName, cropMage )
# Remove the MRC
os.remove( diagName )
pass
def batchProcess( mrcNames, pngFronts, optInit, optPlot, optRefine=None, n_processes=4 ):
#outQueue = mp.Queue()
pool = mp.Pool( processes=n_processes )
GautoParam = [None] * len(mrcNames)
for J, mrcName in enumerate(mrcNames):
GautoParam[J] = [mrcNames[J],pngFronts[J],'batch', optInit, optPlot, optRefine]
pool.map( runGauto, GautoParam )
pool.close()
pool.join()
# try:
# pool.map( runGauto, GautoParam )
# pool.close()
# except Exception, e:
# print( "Gautoauto Error: un-handled exception: " + str(e) )
# pool.terminate()
# finally:
# pool.join()
if __name__ == "__main__":
import sys
# None for any entry in the dictionary will not be passed to Gautomatch
optInit = {}
optInit['apixM'] = 1.326
optInit['diameter'] = 180
optInit['boxsize']= 224
optInit['min_dist'] = 240
optInit['T'] = None # Templates.mrc
optInit['apixT'] = None
optInit['ang_step'] = None
optInit['speed'] = None
optInit['cc_cutoff'] = 0.1
optInit['lsigma_D'] = 200
optInit['lsigma_cutoff'] = None
optInit['lave_D'] = 300
optInit['lave_max'] = None
optInit['lave_min'] = None
optInit['hp'] = 400
optInit['lp'] = None
optInit['pre_lp'] = None
optInit['pre_hp'] = None
##### OPTIONS FOR Gautomatch REFINEMENT ######
optRefine = {}
optRefine['doRefine'] = False
optRefine['cdfThres'] = 0.90 # varies in range [0,1], for example 0.95 is 95th percentile
##### Plotting options #####
optPlot = {}
optPlot['edge'] = 64 # edge in pixels to chop off ccmax to avoid edge artifacts
optPlot['boxAlpha'] = 0.25
optPlot['colorMap'] = plt.cm.gist_rainbow
optPlot['pref'] = False
optPlot['ccmax'] = False
optPlot['lsigma'] = False
optPlot['bg'] = False
optPlot['bgfree'] = True
optPlot['mask'] = False
mrcGlob = "*filt.mrc" # Try not to use *.mrc as it will match *ccmax.mrc for example
t0 = time.time()
mrcList = glob.glob( mrcGlob )
if len( sys.argv ) > 1 and (sys.argv[1] == "--inspect" or sys.argv[1] == "-i"):
import random
if len( sys.argv ) >= 3 and bool( sys.argv[2] ):
icnt = int( sys.argv[2] )
else:
icnt = 3
indices = np.array( random.sample( np.arange(0,len(mrcList) ), icnt) ).flatten()
print( "Inspecting %d micrographs : %s" % (icnt,indices) )
mrcList = np.array(mrcList)[ indices ]
GautoParam = [None] * len(mrcList)
for J, mrcName in enumerate(mrcList):
GautoParam[J] = [mrcName,'inspect']
else: #default behaviour is batch processing of entire directory.
batchProcess( mrcList )
# print( output )
t1 = time.time()
print( "Finished auto-Gautomatch in %.2f s" %(t1-t0) ) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/automator/Gautoauto.py | Gautoauto.py |
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import matplotlib
matplotlib.use( 'Qt4Agg' )
try:
from PySide import QtGui
matplotlib.rcParams['backend.qt4']='PySide'
os.environ.setdefault('QT_API','pyside')
except:
# Import PyQt4 as backup?
print( "MplCanvas: PySide not found." )
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
#from matplotlib.figure import Figure
import numpy as np
#from itertools import cycle
#from collections import OrderedDict
import skimage.io
from zorro import plot as zplt
import subprocess
import tempfile
# How to design custom controls with PyQT:
# http://doc.qt.digia.com/qq/qq26-pyqtdesigner.html
class MplCanvas(FigureCanvas,object):
"""This is an empty QWidget of type FigureCanvasQTAgg. Uses a zorro_plotting.zorroPlot object to do all
the live plotting, or it can load graphics files from disk."""
@property
def zorroObj(self):
return self._zorroObj
@zorroObj.setter
def zorroObj(self, newZorroObj ):
#print( "Set _zorroObj" )
if not bool( newZorroObj ):
return
self._zorroObj = newZorroObj
# Used for mapping combo box text to files in the zorroObj
# baseName should be location of the config file
baseDir = ''
# if 'config' in self._zorroObj.files:
# baseDir = os.path.split( self._zorroObj.files['config'] )[0]
if 'figBoxMask' in self._zorroObj.files:
# This isn't here... it's next to sum...
self.pixmapDict[u'Box Mask'] = os.path.join( baseDir, self._zorroObj.files['figBoxMask'] )
if 'figStats' in self._zorroObj.files:
self.pixmapDict[u'Statistics'] = os.path.join( baseDir, self._zorroObj.files['figStats'] )
if 'figTranslations' in self._zorroObj.files:
self.pixmapDict[u'Drift'] = os.path.join( baseDir, self._zorroObj.files['figTranslations'] )
if 'figPixRegError' in self._zorroObj.files:
self.pixmapDict[u'Drift error'] = os.path.join( baseDir, self._zorroObj.files['figPixRegError'] )
if 'figPeaksigTriMat' in self._zorroObj.files:
self.pixmapDict[u'Peak significance'] = os.path.join( baseDir, self._zorroObj.files['figPeaksigTriMat'] )
if 'figCorrTriMat' in self._zorroObj.files:
self.pixmapDict[u'Correlation coefficient'] = os.path.join( baseDir, self._zorroObj.files['figCorrTriMat'] )
if 'figCTFDiag' in self._zorroObj.files:
self.pixmapDict[u'CTF diagnostic'] = os.path.join( baseDir, self._zorroObj.files['figCTFDiag'] )
if 'figLogisticWeights' in self._zorroObj.files:
self.pixmapDict[u'Logistic weights'] = os.path.join( baseDir, self._zorroObj.files['figLogisticWeights'] )
if 'figImageSum' in self._zorroObj.files:
self.pixmapDict[u'Image sum'] = os.path.join( baseDir, self._zorroObj.files['figImageSum'] )
if 'figFFTSum' in self._zorroObj.files:
self.pixmapDict[u'Fourier mag'] = os.path.join( baseDir, self._zorroObj.files['figFFTSum'] )
if 'figPolarFFTSum' in self._zorroObj.files:
self.pixmapDict[u'Polar mag'] = os.path.join( baseDir, self._zorroObj.files['figPolarFFTSum'] )
if 'figFiltSum' in self._zorroObj.files:
self.pixmapDict[u'Dose filtered sum'] = os.path.join( baseDir, self._zorroObj.files['figFiltSum'] )
if 'figFRC' in self._zorroObj.files:
self.pixmapDict[u'Fourier Ring Correlation'] = os.path.join( baseDir, self._zorroObj.files['figFRC'] )
def __init__(self, parent=None, width=4, height=4, plot_dpi=72, image_dpi=250):
object.__init__(self)
self.plotObj = zplt.zorroPlot( width=width, height=height,
plot_dpi=plot_dpi, image_dpi=image_dpi,
facecolor=[0,0,0,0], MplCanvas=self )
FigureCanvas.__init__(self, self.plotObj.fig)
self.currPlotFunc = self.plotObj.plotTranslations
self.cmap = 'gray'
self._zorroObj = None
self.plotName = None
self.live = True # Whether to re-render the plots with each update event or use a rendered graphics-file loaded from disk
self.PixmapName = None
self.Pixmap = None
# plotFuncs is a hash to function mapping
# These may need to add the appropriate data to plotDict? I could use functools.partial?
self.plotFuncs = {}
self.plotFuncs[""] = None
self.plotFuncs[u'Statistics'] = self.plotObj.plotStats
self.plotFuncs[u'Drift'] = self.plotObj.plotTranslations
self.plotFuncs[u'Drift error'] = self.plotObj.plotPixRegError
self.plotFuncs[u'Peak significance'] = self.plotObj.plotPeaksigTriMat
self.plotFuncs[u'Correlation coefficient'] = self.plotObj.plotCorrTriMat
self.plotFuncs[u'CTF diagnostic'] = self.plotObj.plotCTFDiag
self.plotFuncs[u'Logistic weights'] = self.plotObj.plotLogisticWeights
self.plotFuncs[u'Stack'] = self.plotObj.plotStack
self.plotFuncs[u'Image sum'] = self.plotObj.plotImage
self.plotFuncs[u'Fourier mag'] = self.plotObj.plotFFT
self.plotFuncs[u'Polar mag'] = self.plotObj.plotPolarFFT
self.plotFuncs[u'Cross correlations'] = self.plotObj.plotStack # TODO
self.plotFuncs[u'Dose filtered sum'] = self.plotObj.plotImage
self.plotFuncs[u'Fourier Ring Correlation'] = self.plotObj.plotFRC
self.liveFuncs = {}
self.liveFuncs[u'Statistics'] = self.liveStats
self.liveFuncs[u'Image sum'] = self.liveImageSum
self.liveFuncs[u'Dose filtered sum'] = self.liveFiltSum
self.liveFuncs[u'Drift'] = self.liveTranslations
self.liveFuncs[u'Drift error'] = self.livePixRegError
self.liveFuncs[u'Peak significance'] = self.livePeaksigTriMat
self.liveFuncs[u'Correlation coefficient'] = self.livePeaksigTriMat
self.liveFuncs[u'Logistic weights'] = self.liveLogisticWeights
self.liveFuncs[u'Fourier Ring Correlation'] = self.liveFRC
self.liveFuncs[u'CTF diagnostic'] = self.liveCTFDiag
self.pixmapDict = {}
# WARNING WITH SPYDER: Make sure PySide is the default in the console
# self.setSizePolicy(self, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.updateGeometry()
##### 2DX VIEW #####
def exportTo2dx( self ):
# Write a params file
#paramFile = tempfile.mktemp()
#with open( paramFile, 'w' ):
# pass
# Temporary directory that we can delete? We could use tempfile
# Invoke
#subprocess.Popen( "2dx_viewer -p %s %s" % (paramFile) )
# When to delete paramFile?
if self.plotName == u'Dose filtered sum':
realPath = os.path.realpath( self._zorroObj.files['filt'] )
subprocess.Popen( "2dx_viewer %s" % (realPath), shell=True )
elif self.plotName == u'Image sum':
realPath = os.path.realpath( self._zorroObj.files['sum'] )
subprocess.Popen( "2dx_viewer %s" % (realPath), shell=True )
else:
print( "Unsupported plot function for 2dx_viewer" )
pass
def exportToIms( self ):
if self.plotName == u'Dose filtered sum':
realPath = os.path.realpath( self._zorroObj.files['filt'] )
subprocess.Popen( "ims %s" % (realPath), shell=True )
elif self.plotName == u'Image sum':
realPath = os.path.realpath( self._zorroObj.files['sum'] )
subprocess.Popen( "ims %s" % (realPath), shell=True )
else:
print( "Unsupported plot function for ims" )
pass
##### LIVE VIEW #####
def livePlot(self, plotName ):
print( "called livePlot" )
# Check the plotObj's plotDict for correct fields
# Do seperate sub-functions for each plot type?
if self._zorroObj == None:
return
if plotName in self.liveFuncs:
self.liveFuncs[plotName]()
else:
print( "Live function: %s not found." % plotName )
self.currPlotFunc = self.plotObj.plotEmpty
# Plot
self.currPlotFunc()
self.redraw()
def liveStats( self ):
self.plotObj.plotDict['pixelsize'] = self._zorroObj.pixelsize
self.plotObj.plotDict['voltage'] = self._zorroObj.voltage
self.plotObj.plotDict['c3'] = self._zorroObj.C3
if len( self._zorroObj.errorDictList ) > 0 and 'peaksigTriMat' in self._zorroObj.errorDictList[-1]:
peaksig = self._zorroObj.errorDictList[-1]['peaksigTriMat']
peaksig = peaksig[ peaksig > 0.0 ]
self.plotObj.plotDict['meanPeaksig'] = np.mean( peaksig )
self.plotObj.plotDict['stdPeaksig'] = np.std( peaksig )
if np.any( self._zorroObj.CTFInfo['DefocusU'] ):
self.plotObj.plotDict['CTFInfo'] = self._zorroObj.CTFInfo
self.currPlotFunc = self.plotObj.plotStats
def liveImageSum( self ):
try:
if not np.any(self._zorroObj.imageSum): # Try to load it
self._zorroObj.loadData( stackNameIn = self._zorroObj.files['sum'], target="sum" )
self.plotObj.plotDict['image'] = self._zorroObj.getSumCropToLimits()
self.plotObj.plotDict['image_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotImage
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveFiltSum( self ):
try:
if not np.any(self._zorroObj.filtSum): # Try to load it
self._zorroObj.loadData( stackNameIn = self._zorroObj.files['filt'], target="filt" )
self.plotObj.plotDict['image'] = self._zorroObj.getFiltSumCropToLimits()
self.plotObj.plotDict['image_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotImage
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveTranslations( self ):
if np.any( self._zorroObj.translations ):
self.plotObj.plotDict['translations'] = self._zorroObj.translations
try:
self.plotObj.plotDict['errorX'] = self._zorroObj.errorDictList[0]['errorX']
self.plotObj.plotDict['errorY'] = self._zorroObj.errorDictList[0]['errorY']
except: pass
self.currPlotFunc = self.plotObj.plotTranslations
else:
self.currPlotFunc = self.plotObj.plotEmpty
def livePixRegError( self ):
try:
self.plotObj.plotDict['errorX'] = self._zorroObj.errorDictList[0]['errorX']
self.plotObj.plotDict['errorY'] = self._zorroObj.errorDictList[0]['errorY']
self.plotObj.plotDict['errorXY'] = self._zorroObj.errorDictList[0]['errorXY']
self.currPlotFunc = self.plotObj.plotPixRegError
except:
self.currPlotFunc = self.plotObj.plotEmpty
def livePeaksigTriMat( self ):
try:
self.plotObj.plotDict['peaksigTriMat'] = self._zorroObj.errorDictList[0]['peaksigTriMat']
self.plotObj.plotDict['graph_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotPeaksigTriMat
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveCorrTriMat( self ):
try:
self.plotObj.plotDict['corrTriMat'] = self._zorroObj.errorDictList[0]['corrTriMat']
self.plotObj.plotDict['graph_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotCorrTriMat
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveLogisticWeights( self ):
try:
if self._zorroObj.weightMode == 'autologistic' or self._zorroObj.weightMode == 'logistic':
self.plotObj.plotDict['peaksigThres'] = self._zorroObj.peaksigThres
self.plotObj.plotDict['logisticK'] = self._zorroObj.logisticK
self.plotObj.plotDict['logisticNu'] = self._zorroObj.logisticNu
self.plotObj.plotDict['errorXY'] = self._zorroObj.errorDictList[0]["errorXY"]
self.plotObj.plotDict['peaksigVect'] = self._zorroObj.errorDictList[0]["peaksigTriMat"][ self._zorroObj.errorDictList[0]["peaksigTriMat"] > 0.0 ]
if 'cdfPeaks' in self._zorroObj.errorDictList[0]:
self.plotObj.plotDict['cdfPeaks'] = self._zorroObj.errorDictList[0]['cdfPeaks']
self.plotObj.plotDict['hSigma'] = self._zorroObj.errorDictList[0]['hSigma']
self.currPlotFunc = self.plotObj.plotLogisticWeights
except Exception as e:
print( "MplCanvas.liveLogisticWeights received exception " + str(e) )
self.currPlotFunc = self.plotObj.plotEmpty
def liveFRC( self ):
try:
self.plotObj.plotDict['FRC'] = self._zorroObj.FRC
self.plotObj.plotDict['pixelsize'] = self._zorroObj.pixelsize
if bool( self.zorroObj.doEvenOddFRC ):
self.plotObj.plotDict['labelText'] = "Even-odd frame independent FRC"
else:
self.plotObj.plotDict['labelText'] = "Non-independent FRC is not a resolution estimate"
self.currPlotFunc = self.plotObj.plotFRC
except:
self.currPlotFunc = self.plotObj.plotEmpty
def liveCTFDiag( self ):
try:
self.plotObj.plotDict['CTFDiag'] = self._zorroObj.CTFDiag
self.plotObj.plotDict['CTFInfo'] = self._zorroObj.CTFInfo
self.plotObj.plotDict['pixelsize'] = self._zorroObj.pixelsize
self.plotObj.plotDict['image_cmap'] = self.cmap
self.currPlotFunc = self.plotObj.plotCTFDiag
except:
self.currPlotFunc = self.plotObj.plotEmpty
##### DEAD VIEW #####
def loadPixmap( self, plotName, filename = None ):
if not bool(filename):
# Pull the filename from the zorro log
try:
# print( plotName )
filename = self.pixmapDict[plotName]
print( "Pulling figure name: %s"%filename )
except KeyError:
self.currPlotFunc = self.plotObj.plotEmpty()
self.redraw()
if not bool( filename ): # Probably an unprocessed stack
return
if not os.path.isfile(filename):
raise IOError("automator.MplCanvas.loadPixmap: file not found: %s" % filename )
return
self.PixmapName = filename
self.Pixmap = skimage.io.imread( filename )
self.plotObj.plotDict['pixmap'] = self.Pixmap
self.currPlotFunc = self.plotObj.plotPixmap()
self.redraw()
def updatePlotFunc(self, plotName, newZorroObj = None ):
# print( "plotName = " + str(plotName) +", zorroObj = " + str(newZorroObj) )
try:
self.plotName = plotName
self.currPlotFunc = self.plotFuncs[ plotName ]
except KeyError:
raise KeyError( "automator.MplCanvas.updatePlotFunc: Plot type not found in plotDict: %s" % plotName )
self.zorroObj = newZorroObj # setter auto-checks validity... settler isn't working right...
if bool( self.live ):
self.plotObj.axes2 = None
self.livePlot( plotName )
else:
self.loadPixmap( plotName )
def redraw(self):
#self.plotObj.updateCanvas()
self.draw() | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/automator/MplCanvas.py | MplCanvas.py |
from __future__ import division, print_function, absolute_import, unicode_literals
from PySide import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
import os.path
import numpy as np
#from copy import copy
import functools
# Import static types from MplCanvas
#from . import MplCanvas
import matplotlib.image
from . import Ui_ViewWidget
from zorro.zorro_util import which
class ViewWidget(QtGui.QWidget, Ui_ViewWidget.Ui_ViewWidget, object):
@property
def live( self ):
return self._live
@live.setter
def live( self, value ):
value = bool(value)
if value != self._live:
self._live = value
self.tbLive.setChecked(value)
self.viewCanvas.live = value
self.tbChangeColormap.setEnabled(value)
self.tbLogIntensity.setEnabled(value)
self.tbToggleColorbar.setEnabled(value)
self.tbToggleHistogramContrast.setEnabled(value)
self.viewCanvas.updatePlotFunc( self.comboView.currentText() )
def __init__( self, parent=None ):
"""
QtGui.QWidget.__init__(self)
self.ui = Ui_ViewWidget()
self.ui.setupUi( self )
"""
object.__init__(self)
QtGui.QWidget.__init__(self)
# Using multiple inheritence setup Ui from QtDesigner
self.setupUi(self)
self.parent = parent # This is generally not what I need, what I need is the Automator object
self.autoParent = None # Set by hand in the Automator.__init__() function
self.popout = "2dx_viewer"
self.__popoutObj = None
self.viewNumber = 0
self._live = True
# Re-direct keyPress functions
self.keyPressEvent = self.grabKey
# Run through all the widgets and redirect the key-presses to everything BUT the spinboxes
# widgetlist = self.ui.centralwidget.findChildren( QtGui.QWidget )
# print "TO DO: release focus from spinboxes on ENTER key press"
# for mywidget in widgetlist:
# # print "Pause"
# if not mywidget.__class__ is QtGui.QDoubleSpinBox:
# mywidget.keyPressEvent = self.grabKey
# Set paths to icons to absolute paths
self.joinIconPaths()
# Connect slots
self.comboView.currentIndexChanged.connect( self.updatePlotType )
self.tbNextImage.clicked.connect( functools.partial( self.shiftImageIndex, 1 ) )
self.tbPrevImage.clicked.connect( functools.partial( self.shiftImageIndex, -1 ) )
self.tbShowBoxes.toggled.connect( self.toggleShowBoxes )
# self.leImageIndex.textChanged( self.updateImageIndex )
self.leImageIndex.textEdited.connect( self.updateImageIndex )
self.tbToggleColorbar.toggled.connect( self.toggleColorbar )
self.tbChangeColormap.clicked.connect( self.cycleColormap )
self.tbLogIntensity.toggled.connect( self.toggleLogInt )
self.tbPopoutView.clicked.connect( self.popoutViewDialog )
self.tbLive.toggled.connect( self.toggleLiveView )
self.sbHistogramCutoff.valueChanged.connect( self.updateHistClim )
# This doesn't work because there's two types of valueChanged sent
# BUT ONLY WHEN AN IMAGE IS LOADED...
# Has another example with new types
# http://pyqt.sourceforge.net/Docs/PyQt4/new_style_signals_slots.html
# Try oldschool connect, deprecated not working?
# self.connect( self.sbHistogramCutoff, QtCore.SIGNAL('valueChanged(double)'), self.updateHistClim )
# self.connect
# This says it's a timer thing due to mouse presses:
# http://www.qtcentre.org/threads/43078-QSpinBox-Timer-Issue
self.sbHistogramCutoff.validate = None
def grabKey( self, event ):
# I think some of these key presses aren't being intercepted?
print( "ViewWidget"+str(self.viewNumber)+"::grabKey : " + str(event.key()) )
if( event.key() == QtCore.Qt.Key_Down ):
print( "Down" )
elif( event.key() == QtCore.Qt.Key_Up ):
print( "Up" )
elif( event.key() == QtCore.Qt.Key_Left ):
print( "Left" )
elif( event.key() == QtCore.Qt.Key_Right ):
print( "Right" )
elif( event.key() == QtCore.Qt.Key_PageUp ):
print( "PageUp" )
elif( event.key() == QtCore.Qt.Key_PageDown ):
print( "PageDown" )
else:
return
def joinIconPaths(self):
# Icon's aren't pathed properly if the CWD is somewhere else than the source folder, so...
self.source_dir = os.path.dirname( os.path.realpath(__file__) )
# Join all the icons and reload them
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/application-resize.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbPopoutView.setIcon(icon)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/monitor-dead.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
icon1.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/monitor-live.png")), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.tbLive.setIcon(icon1)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/color.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbChangeColormap.setIcon(icon2)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/colorbar.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbToggleColorbar.setIcon(icon3)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/magnifier-zoom-in.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbZoomIn.setIcon(icon4)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/magnifier-zoom-out.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbZoomOut.setIcon(icon5)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/boxes.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbShowBoxes.setIcon(icon6)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/logscale.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbLogIntensity.setIcon(icon7)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/histogram.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbToggleHistogramContrast.setIcon(icon8)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/arrow-180.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbPrevImage.setIcon(icon9)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(os.path.join( self.source_dir, "icons/arrow.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tbNextImage.setIcon(icon10)
def toggleLiveView( self ):
self.live = self.tbLive.isChecked()
def toggleShowBoxes( self ):
print( "Trying box overlay" )
self.viewCanvas.plotObj.plotDict['boxMask'] = None
if self.tbShowBoxes.isChecked():
# Load from the zorroObj
try:
print( "figBoxMask: " + str(self.viewCanvas.zorroObj.files['figBoxMask']) )
except:
pass
if 'figBoxMask' in self.viewCanvas.zorroObj.files:
self.viewCanvas.plotObj.plotDict['boxMask'] = matplotlib.image.imread( self.viewCanvas.zorroObj.files['figBoxMask'] )
self.viewCanvas.updatePlotFunc( self.comboView.currentText() )
# Decorators to stop multiple events (doesn't work damnit, everything is float)
# @QtCore.Slot(float)
# @QtCore.Slot(str)
def updateHistClim( self, value ):
self.viewCanvas.param['cutoff'] = np.power( 10.0, self.sbHistogramCutoff.value())
self.viewCanvas.updatePlotFunc( self.comboView.currentText() )
pass
def toggleLogInt( self ):
# This should call updateHistClim, not sure if I want too
self.viewCanvas.param['logScale'] = ~self.viewCanvas.param['logScale']
self.viewCanvas.updatePlotFunc( self.comboView.currentText() )
def popoutViewDialog( self ):
"""
So the logic here has
"""
if (self.viewCanvas.plotName == u'Dose filtered sum'
or self.viewCanvas.plotName == u'Image sum'):
if self.popout == "2dx_viewer" and which( "2dx_viewer" ):
self.viewCanvas.exportTo2dx()
return
elif self.popout == "ims" and which( "ims" ):
self.viewCanvas.exportToIms()
return
# Fallback mode
self.__popoutObj = ViewDialog()
self.copyDeep( self.__popoutObj.view ) # ViewDialog is just a wrapper around ViewWidget 'view'
# Unfortunately the copy library doesn't work nicely with Qt, so we have to implement this.
def copyDeep( self, thecopy ):
thecopy.viewNumber = self.viewNumber + 100
thecopy.parent = self.parent
thecopy.autoParent = self.autoParent
# thecopy.viewCanvas = copy( self.viewCanvas )
thecopy.updateZorroObj( self.viewCanvas.zorroObj )
# No copy of popout
# Turn events OFF
thecopy.blockSignals( True )
print( "BLOCKING SIGNALS" )
thecopy.tbToggleColorbar.setChecked( self.tbToggleColorbar.isChecked() )
thecopy.tbLogIntensity.setChecked( self.tbLogIntensity.isChecked() )
thecopy.tbToggleHistogramContrast.setChecked( self.tbToggleHistogramContrast.isChecked() )
thecopy.leImageIndex.setText( self.leImageIndex.text() )
thecopy.sbHistogramCutoff.blockSignals( True )
thecopy.sbHistogramCutoff.setValue( self.sbHistogramCutoff.value() )
thecopy.sbHistogramCutoff.blockSignals( False )
thecopy.comboView.setCurrentIndex( self.comboView.currentIndex() )
thecopy.updatePlotType(0)
thecopy.blockSignals( False )
print( "UNBLOCKING SIGNALS" )
def toggleColorbar( self ):
self.viewCanvas.plotObj.plotDict['colorbar'] = self.tbToggleColorbar.isChecked()
self.viewCanvas.updatePlotFunc( self.comboView.currentText() )
def cycleColormap( self ):
# This is sort of dumb, just have a function inside zorroPlot for this.
self.viewCanvas.cmap = self.viewCanvas.plotObj.cmaps_cycle.next()
self.viewCanvas.plotObj.plotDict['image_cmap'] = self.viewCanvas.cmap
self.viewCanvas.plotObj.plotDict['graph_cmap'] = self.viewCanvas.cmap
self.viewCanvas.updatePlotFunc( self.comboView.currentText() )
def shiftImageIndex( self, shift=1 ):
newIndex = np.int32( self.leImageIndex.text() )
newIndex += shift
self.updateImageIndex( imageIndex = newIndex )
def updateImageIndex( self, imageIndex=None ):
if imageIndex is None:
imageIndex = np.int( self.leImageIndex.text() )
self.viewCanvas.param['imageIndex'] = imageIndex
self.refreshCanvas()
self.leImageIndex.blockSignals( True )
self.leImageIndex.setText( "%s"%imageIndex )
self.leImageIndex.blockSignals( False )
def updateZorroObj( self, zorroObj = None ):
self.viewCanvas.updatePlotFunc( self.comboView.currentText(), zorroObj )
def updatePlotType( self, index ):
# This function is called when you need to update the underlying data of the canvas
self.viewCanvas.updatePlotFunc( self.comboView.currentText() )
def loadConfig( self, config ):
groupstring = u"view%d" % self.viewNumber
try: self.comboView.setCurrentIndex( self.comboView.findText( config.get( groupstring, u'plotType' ) ) )
except: print( "Failed to set plotType for view %d"%self.viewNumber )
try:
self.live = config.getboolean( groupstring, u'live')
except: pass
try: self.tbToggleColorbar.checked( config.getboolean( groupstring, u'colorBar') )
except: pass
try: self.tbToggleHistogramContrast.checked( config.getboolean( groupstring, u'histogramContrast') )
except: pass
try: self.tbLogIntensity.checked( config.getboolean( groupstring, u'logIntensity') )
except: pass
try: self.sbHistogramCutoff.setValue( config.getint( groupstring, u'histogramCutoff') )
except: pass
try: self.tbShowBoxes.checked( config.getboolean( groupstring, u'showBoxes') )
except: pass
try: self.viewCanvas.plotObj.plotDict['image_cmap'] = config.get( groupstring, u'image_cmap' )
except: pass
try: self.viewCanvas.plotObj.plotDict['graph_cmap'] = config.get( groupstring, u'graph_cmap' )
except: pass
def saveConfig( self, config ):
groupstring = u"view%d" % self.viewNumber
config.add_section(groupstring)
config.set( groupstring, u'plotType', self.comboView.currentText() )
config.set( groupstring, u'live', self.tbLive.isChecked() )
config.set( groupstring, u'colorBar', self.tbToggleColorbar.isChecked() )
config.set( groupstring, u'histogramContrast', self.tbToggleHistogramContrast.isChecked() )
config.set( groupstring, u'logIntensity', self.tbLogIntensity.isChecked() )
config.set( groupstring, u'histogramCutoff', self.sbHistogramCutoff.value() )
config.set( groupstring, u'showBoxes', self.tbShowBoxes.isChecked() )
# We can only save some plotDict keys because it might contain a lot of data!
try: config.set( groupstring, u'image_cmap', self.viewCanvas.plotObj.plotDict['image_cmap'] )
except: pass
try: config.set( groupstring, u'graph_cmap', self.viewCanvas.plotObj.plotDict['graph_cmap'] )
except: pass
pass
from . import Ui_ViewDialog
class ViewDialog(QtGui.QDialog, Ui_ViewDialog.Ui_ViewDialog):
def __init__(self):
QtGui.QDialog.__init__(self)
self.setupUi(self)
# Set up the user interface from Designer.
self.show() | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/automator/ViewWidget.py | ViewWidget.py |
__all__ = ['E']
import operator
import sys
import threading
import numpy
# Declare a double type that does not exist in Python space
double = numpy.double
# The default kind for undeclared variables
default_kind = 'double'
if sys.version_info[0] < 3:
int_ = numpy.int32
long_ = numpy.int64
else:
int_ = numpy.int32
long_ = numpy.int64
complex64 = numpy.complex64
type_to_kind = {bool: 'bool', int_: 'int', long_: 'long', float: 'float',
double: 'double', complex: 'complex', complex64: 'complex64',
bytes: 'bytes'}
kind_to_type = {'bool': bool, 'int': int_, 'long': long_, 'float': float,
'double': double, 'complex': complex, 'complex64' : complex64,
'bytes': bytes}
# RAM: IN SJP's branch, this was letting complex64 cast to double, which is not good behaviour either.
kind_rank = ['bool', 'int', 'long', 'float', 'double', 'complex64', 'complex', 'none']
scalar_constant_types = [bool, int, numpy.int64, float, complex64, double, complex, bytes]
# Final corrections for Python 3 (mainly for PyTables needs)
if sys.version_info[0] > 2:
type_to_kind[str] = 'str'
kind_to_type['str'] = str
scalar_constant_types.append(str)
scalar_constant_types = tuple(scalar_constant_types)
#from numexprz import interpreter
#import interpreter
if sys.version_info[0] < 3:
from numexprz import interpreter
else:
from . import interpreter
#try:
# from numexprz import interpreter
#except:
# from . import interpreter
class Expression(object):
def __init__(self):
object.__init__(self)
def __getattr__(self, name):
if name.startswith('_'):
try:
self.__dict__[name]
except KeyError:
raise AttributeError
else:
return VariableNode(name, default_kind)
E = Expression()
class Context(threading.local):
initialized = False
def __init__(self, dict_):
if self.initialized:
raise SystemError('__init__ called too many times')
self.initialized = True
self.__dict__.update(dict_)
def get(self, value, default):
return self.__dict__.get(value, default)
def get_current_context(self):
return self.__dict__
def set_new_context(self, dict_):
self.__dict__.update(dict_)
# This will be called each time the local object is used in a separate thread
_context = Context({})
def get_optimization():
return _context.get('optimization', 'none')
# helper functions for creating __magic__ methods
def ophelper(f):
def func(*args):
args = list(args)
for i, x in enumerate(args):
if isConstant(x):
args[i] = x = ConstantNode(x)
if not isinstance(x, ExpressionNode):
raise TypeError("unsupported object type: %s" % type(x))
return f(*args)
func.__name__ = f.__name__
func.__doc__ = f.__doc__
func.__dict__.update(f.__dict__)
return func
def allConstantNodes(args):
"returns True if args are all ConstantNodes."
for x in args:
if not isinstance(x, ConstantNode):
return False
return True
def isConstant(ex):
"Returns True if ex is a constant scalar of an allowed type."
return isinstance(ex, scalar_constant_types)
def commonKind(nodes):
node_kinds = [node.astKind for node in nodes]
str_count = node_kinds.count('bytes') + node_kinds.count('str')
if 0 < str_count < len(node_kinds): # some args are strings, but not all
raise TypeError("strings can only be operated with strings")
if str_count > 0: # if there are some, all of them must be
return 'bytes'
n = -1
for x in nodes:
n = max(n, kind_rank.index(x.astKind))
return kind_rank[n]
max_int32 = 2147483647
min_int32 = -max_int32 - 1
def bestConstantType(x):
# ``numpy.string_`` is a subclass of ``bytes``
if isinstance(x, (bytes, str)):
return bytes
# Numeric conversion to boolean values is not tried because
# ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be
# interpreted as booleans when ``False`` and ``True`` are already
# supported.
if isinstance(x, (bool, numpy.bool_)):
return bool
# ``long`` objects are kept as is to allow the user to force
# promotion of results by using long constants, e.g. by operating
# a 32-bit array with a long (64-bit) constant.
if isinstance(x, (long_, numpy.int64)):
return long_
# ``double`` objects are kept as is to allow the user to force
# promotion of results by using double constants, e.g. by operating
# a float (32-bit) array with a double (64-bit) constant.
if isinstance(x, double):
return double
if isinstance(x, (int, numpy.integer)):
# Constants needing more than 32 bits are always
# considered ``long``, *regardless of the platform*, so we
# can clearly tell 32- and 64-bit constants apart.
if not (min_int32 <= x <= max_int32):
return long_
return int_
if isinstance(x, complex64):
return complex64
# The duality of float and double in Python avoids that we have to list
# ``double`` too.
for converter in float, complex:
try:
y = converter(x)
except StandardError:
continue
if y == x:
return converter
def getKind(x):
converter = bestConstantType(x)
return type_to_kind[converter]
def binop(opname, reversed=False, kind=None):
# Getting the named method from self (after reversal) does not
# always work (e.g. int constants do not have a __lt__ method).
opfunc = getattr(operator, "__%s__" % opname)
@ophelper
def operation(self, other):
if reversed:
self, other = other, self
if allConstantNodes([self, other]):
return ConstantNode(opfunc(self.value, other.value))
else:
return OpNode(opname, (self, other), kind=kind)
return operation
def func(func, minkind=None, maxkind=None):
@ophelper
def function(*args):
if allConstantNodes(args):
return ConstantNode(func(*[x.value for x in args]))
kind = commonKind(args)
if kind in ('int', 'long'):
# Exception for following NumPy casting rules
#FIXME: this is not always desirable. The following
# functions which return ints (for int inputs) on numpy
# but not on numexpr: copy, abs, fmod, ones_like
kind = 'double'
else:
# Apply regular casting rules
if minkind and kind_rank.index(minkind) > kind_rank.index(kind):
kind = minkind
if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind):
kind = maxkind
return FuncNode(func.__name__, args, kind)
return function
@ophelper
def where_func(a, b, c):
if isinstance(a, ConstantNode):
#FIXME: This prevents where(True, a, b)
raise ValueError("too many dimensions")
if allConstantNodes([a, b, c]):
return ConstantNode(numpy.where(a, b, c))
return FuncNode('where', [a, b, c])
def encode_axis(axis):
if isinstance(axis, ConstantNode):
axis = axis.value
if axis is None:
axis = interpreter.allaxes
else:
if axis < 0:
raise ValueError("negative axis are not supported")
if axis > 254:
raise ValueError("cannot encode axis")
return RawNode(axis)
def sum_func(a, axis=None):
axis = encode_axis(axis)
if isinstance(a, ConstantNode):
return a
if isinstance(a, (bool, int_, long_, float, double, complex)):
a = ConstantNode(a)
return FuncNode('sum', [a, axis], kind=a.astKind)
def prod_func(a, axis=None):
axis = encode_axis(axis)
if isinstance(a, (bool, int_, long_, float, double, complex)):
a = ConstantNode(a)
if isinstance(a, ConstantNode):
return a
return FuncNode('prod', [a, axis], kind=a.astKind)
@ophelper
def contains_func(a, b):
return FuncNode('contains', [a, b], kind='bool')
@ophelper
def div_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1. / b.value)])
return OpNode('div', [a, b])
@ophelper
def truediv_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1. / b.value)])
kind = commonKind([a, b])
if kind in ('bool', 'int', 'long'):
kind = 'double'
return OpNode('div', [a, b], kind=kind)
@ophelper
def rtruediv_op(a, b):
return truediv_op(b, a)
@ophelper
def pow_op(a, b):
if allConstantNodes([a, b]):
return ConstantNode(a ** b)
if isinstance(b, ConstantNode):
x = b.value
if get_optimization() == 'aggressive':
RANGE = 50 # Approximate break even point with pow(x,y)
# Optimize all integral and half integral powers in [-RANGE, RANGE]
# Note: for complex numbers RANGE could be larger.
if (int(2 * x) == 2 * x) and (-RANGE <= abs(x) <= RANGE):
n = int_(abs(x))
ishalfpower = int_(abs(2 * x)) % 2
def multiply(x, y):
if x is None: return y
return OpNode('mul', [x, y])
r = None
p = a
mask = 1
while True:
if (n & mask):
r = multiply(r, p)
mask <<= 1
if mask > n:
break
p = OpNode('mul', [p, p])
if ishalfpower:
kind = commonKind([a])
if kind in ('int', 'long'):
kind = 'double'
# RAM: typo here
r = multiply(r, OpNode('sqrt', [a], kind))
if r is None:
r = OpNode('ones_like', [a])
if x < 0:
r = OpNode('div', [ConstantNode(1), r])
return r
if get_optimization() in ('moderate', 'aggressive'):
if x == -1:
return OpNode('div', [ConstantNode(1), a])
if x == 0:
return OpNode('ones_like', [a])
if x == 0.5:
kind = a.astKind
if kind in ('int', 'long'): kind = 'double'
return FuncNode('sqrt', [a], kind=kind)
if x == 1:
return a
if x == 2:
return OpNode('mul', [a, a])
return OpNode('pow', [a, b])
# The functions and the minimum and maximum types accepted
functions = {
'copy': func(numpy.copy),
'ones_like': func(numpy.ones_like),
'sqrt': func(numpy.sqrt, 'float'),
'sin': func(numpy.sin, 'float'),
'cos': func(numpy.cos, 'float'),
'tan': func(numpy.tan, 'float'),
'arcsin': func(numpy.arcsin, 'float'),
'arccos': func(numpy.arccos, 'float'),
'arctan': func(numpy.arctan, 'float'),
'sinh': func(numpy.sinh, 'float'),
'cosh': func(numpy.cosh, 'float'),
'tanh': func(numpy.tanh, 'float'),
'arcsinh': func(numpy.arcsinh, 'float'),
'arccosh': func(numpy.arccosh, 'float'),
'arctanh': func(numpy.arctanh, 'float'),
'fmod': func(numpy.fmod, 'float'),
'arctan2': func(numpy.arctan2, 'float'),
'log': func(numpy.log, 'float'),
'log1p': func(numpy.log1p, 'float'),
'log10': func(numpy.log10, 'float'),
'exp': func(numpy.exp, 'float'),
'expm1': func(numpy.expm1, 'float'),
'abs': func(numpy.absolute, 'float'),
'where': where_func,
# RAM: we can't get proper casting behavior because we don't have a
# seperation between kind and order
# i.e. c8 <-> f4 and c16 <-> f8
# RAM: This block works with double and complex
#'real': func(numpy.real, 'double', 'double'),
#'imag': func(numpy.imag, 'double', 'double'),
#'complex': func(complex, 'complex'),
#'conj': func(numpy.conj, 'complex'),
# RAM: This works with float32 and complex64
'real': func(numpy.real, 'float', 'float'),
'imag': func(numpy.imag, 'float', 'float'),
'complex': func(complex, 'complex'),
'conj': func(numpy.conj, 'complex64'),
'sum': sum_func,
'prod': prod_func,
'contains': contains_func,
}
class ExpressionNode(object):
"""An object that represents a generic number object.
This implements the number special methods so that we can keep
track of how this object has been used.
"""
astType = 'generic'
def __init__(self, value=None, kind=None, children=None):
object.__init__(self)
self.value = value
if kind is None:
kind = 'none'
self.astKind = kind
if children is None:
self.children = ()
else:
self.children = tuple(children)
def get_real(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).real)
return OpNode('real', (self,), 'double')
real = property(get_real)
def get_imag(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).imag)
return OpNode('imag', (self,), 'double')
imag = property(get_imag)
def __str__(self):
return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value,
self.astKind, self.children)
def __repr__(self):
return self.__str__()
def __neg__(self):
return OpNode('neg', (self,))
def __invert__(self):
return OpNode('invert', (self,))
def __pos__(self):
return self
# The next check is commented out. See #24 for more info.
def __nonzero__(self):
raise TypeError("You can't use Python's standard boolean operators in "
"NumExpr expressions. You should use their bitwise "
"counterparts instead: '&' instead of 'and', "
"'|' instead of 'or', and '~' instead of 'not'.")
__add__ = __radd__ = binop('add')
__sub__ = binop('sub')
__rsub__ = binop('sub', reversed=True)
__mul__ = __rmul__ = binop('mul')
if sys.version_info[0] < 3:
__div__ = div_op
__rdiv__ = binop('div', reversed=True)
__truediv__ = truediv_op
__rtruediv__ = rtruediv_op
__pow__ = pow_op
__rpow__ = binop('pow', reversed=True)
__mod__ = binop('mod')
__rmod__ = binop('mod', reversed=True)
__lshift__ = binop('lshift')
__rlshift__ = binop('lshift', reversed=True)
__rshift__ = binop('rshift')
__rrshift__ = binop('rshift', reversed=True)
# boolean operations
__and__ = binop('and', kind='bool')
__or__ = binop('or', kind='bool')
__gt__ = binop('gt', kind='bool')
__ge__ = binop('ge', kind='bool')
__eq__ = binop('eq', kind='bool')
__ne__ = binop('ne', kind='bool')
__lt__ = binop('gt', reversed=True, kind='bool')
__le__ = binop('ge', reversed=True, kind='bool')
class LeafNode(ExpressionNode):
leafNode = True
class VariableNode(LeafNode):
astType = 'variable'
def __init__(self, value=None, kind=None, children=None):
LeafNode.__init__(self, value=value, kind=kind)
class RawNode(object):
"""Used to pass raw integers to interpreter.
For instance, for selecting what function to use in func1.
Purposely don't inherit from ExpressionNode, since we don't wan't
this to be used for anything but being walked.
"""
astType = 'raw'
astKind = 'none'
def __init__(self, value):
self.value = value
self.children = ()
def __str__(self):
return 'RawNode(%s)' % (self.value,)
__repr__ = __str__
class ConstantNode(LeafNode):
astType = 'constant'
def __init__(self, value=None, children=None):
kind = getKind(value)
# Python float constants are double precision by default
if kind == 'float':
kind = 'double'
LeafNode.__init__(self, value=value, kind=kind)
def __neg__(self):
return ConstantNode(-self.value)
def __invert__(self):
return ConstantNode(~self.value)
class OpNode(ExpressionNode):
astType = 'op'
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
ExpressionNode.__init__(self, value=opcode, kind=kind, children=args)
class FuncNode(OpNode):
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
OpNode.__init__(self, opcode, args, kind) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/numexprz/expressions.py | expressions.py |
import os
import subprocess
from numexprz.interpreter import _set_num_threads
from numexprz import use_vml
import numexprz
if use_vml:
from numexprz.interpreter import (
_get_vml_version, _set_vml_accuracy_mode, _set_vml_num_threads)
def get_vml_version():
"""Get the VML/MKL library version."""
if use_vml:
return _get_vml_version()
else:
return None
def set_vml_accuracy_mode(mode):
"""
Set the accuracy mode for VML operations.
The `mode` parameter can take the values:
- 'high': high accuracy mode (HA), <1 least significant bit
- 'low': low accuracy mode (LA), typically 1-2 least significant bits
- 'fast': enhanced performance mode (EP)
- None: mode settings are ignored
This call is equivalent to the `vmlSetMode()` in the VML library.
See:
http://www.intel.com/software/products/mkl/docs/webhelp/vml/vml_DataTypesAccuracyModes.html
for more info on the accuracy modes.
Returns old accuracy settings.
"""
if use_vml:
acc_dict = {None: 0, 'low': 1, 'high': 2, 'fast': 3}
acc_reverse_dict = {1: 'low', 2: 'high', 3: 'fast'}
if mode not in acc_dict.keys():
raise ValueError(
"mode argument must be one of: None, 'high', 'low', 'fast'")
retval = _set_vml_accuracy_mode(acc_dict.get(mode, 0))
return acc_reverse_dict.get(retval)
else:
return None
def set_vml_num_threads(new_nthreads):
"""
Suggests a maximum number of threads to be used in VML operations.
This function is equivalent to the call
`mkl_domain_set_num_threads(nthreads, MKL_DOMAIN_VML)` in the MKL
library. See:
http://www.intel.com/software/products/mkl/docs/webhelp/support/functn_mkl_domain_set_num_threads.html
for more info about it.
"""
if use_vml:
_set_vml_num_threads(new_nthreads)
def set_num_threads(new_nthreads):
"""
Sets a number of threads to be used in operations.
Returns the previous setting for the number of threads.
During initialization time Numexpr sets this number to the number
of detected cores in the system (see `detect_number_of_cores()`).
If you are using Intel's VML, you may want to use
`set_vml_num_threads(nthreads)` to perform the parallel job with
VML instead. However, you should get very similar performance
with VML-optimized functions, and VML's parallelizer cannot deal
with common expresions like `(x+1)*(x-2)`, while Numexpr's one
can.
"""
old_nthreads = _set_num_threads(new_nthreads)
numexprz.nthreads = new_nthreads
return old_nthreads
def detect_number_of_cores():
"""
Detects the number of cores on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(subprocess.check_output(["sysctl", "-n", "hw.ncpu"]))
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
def detect_number_of_physical_cores():
# RAM: since benchmarking shows hyperthreading hurts we would like a cross-platform script that
# returns the number of physical cores, not virtual ones.
try:
import hwloc
topology = hwloc.Topology()
topology.load()
return topology.get_nbobjs_by_type(hwloc.OBJ_CORE)
except:
print( "Could not load hwloc module to check physical core count" )
return 1
def detect_number_of_threads():
"""
If this is modified, please update the note in: https://github.com/pydata/numexpr/wiki/Numexpr-Users-Guide
"""
try:
nthreads = int(os.environ['NUMEXPR_NUM_THREADS'])
except KeyError:
nthreads = int(os.environ.get('OMP_NUM_THREADS', detect_number_of_cores()))
# Check that we don't activate too many threads at the same time.
# 16 seems a sensible value.
max_sensible_threads = 16
if nthreads > max_sensible_threads:
nthreads = max_sensible_threads
# Check that we don't surpass the MAX_THREADS in interpreter.cpp
if nthreads > 4096:
nthreads = 4096
return nthreads
class CacheDict(dict):
"""
A dictionary that prevents itself from growing too much.
"""
def __init__(self, maxentries):
self.maxentries = maxentries
super(CacheDict, self).__init__(self)
def __setitem__(self, key, value):
# Protection against growing the cache too much
# RAM: checking to see how many entries this generates, not very many in-fact
# Therefore we don't seem to have any potential speed-ups here.
# print( "CacheDict length = " + str(len(self) ) )
if len(self) > self.maxentries:
# Remove a 10% of (arbitrary) elements from the cache
entries_to_remove = self.maxentries // 10
for k in self.keys()[:entries_to_remove]:
super(CacheDict, self).__delitem__(k)
super(CacheDict, self).__setitem__(key, value) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/numexprz/utils.py | utils.py |
###################################################################
# cpuinfo - Get information about CPU
#
# License: BSD
# Author: Pearu Peterson <[email protected]>
#
# See LICENSES/cpuinfo.txt for details about copyright and
# rights to use.
####################################################################
"""
cpuinfo
Copyright 2002 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Pearu Peterson
"""
__all__ = ['cpu']
import sys, re, types
import os
import subprocess
import warnings
import platform
import inspect
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, _ = p.communicate()
status = p.returncode
except EnvironmentError as e:
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, ''
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
return True, output
return False, output
def command_info(successful_status=(0,), stacklevel=1, **kw):
info = {}
for key in kw:
ok, output = getoutput(kw[key], successful_status=successful_status,
stacklevel=stacklevel + 1)
if ok:
info[key] = output.strip()
return info
def command_by_line(cmd, successful_status=(0,), stacklevel=1):
ok, output = getoutput(cmd, successful_status=successful_status,
stacklevel=stacklevel + 1)
if not ok:
return
# XXX: check
output = output.decode('ascii')
for line in output.splitlines():
yield line.strip()
def key_value_from_command(cmd, sep, successful_status=(0,),
stacklevel=1):
d = {}
for line in command_by_line(cmd, successful_status=successful_status,
stacklevel=stacklevel + 1):
l = [s.strip() for s in line.split(sep, 1)]
if len(l) == 2:
d[l[0]] = l[1]
return d
class CPUInfoBase(object):
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
def _try_call(self, func):
try:
return func()
except:
pass
def __getattr__(self, name):
if not name.startswith('_'):
if hasattr(self, '_' + name):
attr = getattr(self, '_' + name)
if inspect.ismethod(attr):
return lambda func=self._try_call, attr=attr: func(attr)
else:
return lambda: None
raise AttributeError( name )
def _getNCPUs(self):
return 1
def __get_nbits(self):
abits = platform.architecture()[0]
nbits = re.compile('(\d+)bit').search(abits).group(1)
return nbits
def _is_32bit(self):
return self.__get_nbits() == '32'
def _is_64bit(self):
return self.__get_nbits() == '64'
class LinuxCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = [{}]
ok, output = getoutput(['uname', '-m'])
if ok:
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
except EnvironmentError as e:
warnings.warn(str(e), UserWarning)
else:
for line in fo:
name_value = [s.strip() for s in line.split(':', 1)]
if len(name_value) != 2:
continue
name, value = name_value
if not info or name in info[-1]: # next processor
info.append({})
info[-1][name] = value
fo.close()
self.__class__.info = info
def _not_impl(self):
pass
# Athlon
def _is_AMD(self):
return self.info[0]['vendor_id'] == 'AuthenticAMD'
def _is_AthlonK6_2(self):
return self._is_AMD() and self.info[0]['model'] == '2'
def _is_AthlonK6_3(self):
return self._is_AMD() and self.info[0]['model'] == '3'
def _is_AthlonK6(self):
return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None
def _is_AthlonK7(self):
return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None
def _is_AthlonMP(self):
return re.match(r'.*?Athlon\(tm\) MP\b',
self.info[0]['model name']) is not None
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['family'] == '15'
def _is_Athlon64(self):
return re.match(r'.*?Athlon\(tm\) 64\b',
self.info[0]['model name']) is not None
def _is_AthlonHX(self):
return re.match(r'.*?Athlon HX\b',
self.info[0]['model name']) is not None
def _is_Opteron(self):
return re.match(r'.*?Opteron\b',
self.info[0]['model name']) is not None
def _is_Hammer(self):
return re.match(r'.*?Hammer\b',
self.info[0]['model name']) is not None
# Alpha
def _is_Alpha(self):
return self.info[0]['cpu'] == 'Alpha'
def _is_EV4(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4'
def _is_EV5(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5'
def _is_EV56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56'
def _is_PCA56(self):
return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56'
# Intel
#XXX
_is_i386 = _not_impl
def _is_Intel(self):
return self.info[0]['vendor_id'] == 'GenuineIntel'
def _is_i486(self):
return self.info[0]['cpu'] == 'i486'
def _is_i586(self):
return self.is_Intel() and self.info[0]['cpu family'] == '5'
def _is_i686(self):
return self.is_Intel() and self.info[0]['cpu family'] == '6'
def _is_Celeron(self):
return re.match(r'.*?Celeron',
self.info[0]['model name']) is not None
def _is_Pentium(self):
return re.match(r'.*?Pentium',
self.info[0]['model name']) is not None
def _is_PentiumII(self):
return re.match(r'.*?Pentium.*?II\b',
self.info[0]['model name']) is not None
def _is_PentiumPro(self):
return re.match(r'.*?PentiumPro\b',
self.info[0]['model name']) is not None
def _is_PentiumMMX(self):
return re.match(r'.*?Pentium.*?MMX\b',
self.info[0]['model name']) is not None
def _is_PentiumIII(self):
return re.match(r'.*?Pentium.*?III\b',
self.info[0]['model name']) is not None
def _is_PentiumIV(self):
return re.match(r'.*?Pentium.*?(IV|4)\b',
self.info[0]['model name']) is not None
def _is_PentiumM(self):
return re.match(r'.*?Pentium.*?M\b',
self.info[0]['model name']) is not None
def _is_Prescott(self):
return self.is_PentiumIV() and self.has_sse3()
def _is_Nocona(self):
return self.is_Intel() \
and (self.info[0]['cpu family'] == '6' \
or self.info[0]['cpu family'] == '15' ) \
and (self.has_sse3() and not self.has_ssse3()) \
and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None
def _is_Core2(self):
return self.is_64bit() and self.is_Intel() and \
re.match(r'.*?Core\(TM\)2\b', \
self.info[0]['model name']) is not None
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
self.info[0]['family']) is not None
def _is_XEON(self):
return re.match(r'.*?XEON\b',
self.info[0]['model name'], re.IGNORECASE) is not None
_is_Xeon = _is_XEON
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_fdiv_bug(self):
return self.info[0]['fdiv_bug'] == 'yes'
def _has_f00f_bug(self):
return self.info[0]['f00f_bug'] == 'yes'
def _has_mmx(self):
return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None
def _has_sse(self):
return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None
def _has_sse2(self):
return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None
def _has_sse3(self):
return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None
def _has_ssse3(self):
return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None
def _has_3dnow(self):
return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None
def _has_3dnowext(self):
return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None
class IRIXCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = key_value_from_command('sysconf', sep=' ',
successful_status=(0, 1))
self.__class__.info = info
def _not_impl(self):
pass
def _is_singleCPU(self):
return self.info.get('NUM_PROCESSORS') == '1'
def _getNCPUs(self):
return int(self.info.get('NUM_PROCESSORS', 1))
def __cputype(self, n):
return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n)
def _is_r2000(self):
return self.__cputype(2000)
def _is_r3000(self):
return self.__cputype(3000)
def _is_r3900(self):
return self.__cputype(3900)
def _is_r4000(self):
return self.__cputype(4000)
def _is_r4100(self):
return self.__cputype(4100)
def _is_r4300(self):
return self.__cputype(4300)
def _is_r4400(self):
return self.__cputype(4400)
def _is_r4600(self):
return self.__cputype(4600)
def _is_r4650(self):
return self.__cputype(4650)
def _is_r5000(self):
return self.__cputype(5000)
def _is_r6000(self):
return self.__cputype(6000)
def _is_r8000(self):
return self.__cputype(8000)
def _is_r10000(self):
return self.__cputype(10000)
def _is_r12000(self):
return self.__cputype(12000)
def _is_rorion(self):
return self.__cputype('orion')
def get_ip(self):
try:
return self.info.get('MACHINE')
except:
pass
def __machine(self, n):
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self):
return self.__machine(19)
def _is_IP20(self):
return self.__machine(20)
def _is_IP21(self):
return self.__machine(21)
def _is_IP22(self):
return self.__machine(22)
def _is_IP22_4k(self):
return self.__machine(22) and self._is_r4000()
def _is_IP22_5k(self):
return self.__machine(22) and self._is_r5000()
def _is_IP24(self):
return self.__machine(24)
def _is_IP25(self):
return self.__machine(25)
def _is_IP26(self):
return self.__machine(26)
def _is_IP27(self):
return self.__machine(27)
def _is_IP28(self):
return self.__machine(28)
def _is_IP30(self):
return self.__machine(30)
def _is_IP32(self):
return self.__machine(32)
def _is_IP32_5k(self):
return self.__machine(32) and self._is_r5000()
def _is_IP32_10k(self):
return self.__machine(32) and self._is_r10000()
class DarwinCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
machine='machine')
info['sysctl_hw'] = key_value_from_command(['sysctl', 'hw'], sep='=')
self.__class__.info = info
def _not_impl(self): pass
def _getNCPUs(self):
return int(self.info['sysctl_hw'].get('hw.ncpu', 1))
def _is_Power_Macintosh(self):
return self.info['sysctl_hw']['hw.machine'] == 'Power Macintosh'
def _is_i386(self):
return self.info['arch'] == 'i386'
def _is_ppc(self):
return self.info['arch'] == 'ppc'
def __machine(self, n):
return self.info['machine'] == 'ppc%s' % n
def _is_ppc601(self): return self.__machine(601)
def _is_ppc602(self): return self.__machine(602)
def _is_ppc603(self): return self.__machine(603)
def _is_ppc603e(self): return self.__machine('603e')
def _is_ppc604(self): return self.__machine(604)
def _is_ppc604e(self): return self.__machine('604e')
def _is_ppc620(self): return self.__machine(620)
def _is_ppc630(self): return self.__machine(630)
def _is_ppc740(self): return self.__machine(740)
def _is_ppc7400(self): return self.__machine(7400)
def _is_ppc7450(self): return self.__machine(7450)
def _is_ppc750(self): return self.__machine(750)
def _is_ppc403(self): return self.__machine(403)
def _is_ppc505(self): return self.__machine(505)
def _is_ppc801(self): return self.__machine(801)
def _is_ppc821(self): return self.__machine(821)
def _is_ppc823(self): return self.__machine(823)
def _is_ppc860(self): return self.__machine(860)
class SunOSCPUInfo(CPUInfoBase):
info = None
def __init__(self):
if self.info is not None:
return
info = command_info(arch='arch',
mach='mach',
uname_i='uname_i',
isainfo_b=['isainfo', '-b'],
isainfo_n=['isainfo', '-n'],
)
info['uname_X'] = key_value_from_command('uname -X', sep='=')
for line in command_by_line(['psrinfo', '-v', '0']):
m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line)
if m:
info['processor'] = m.group('p')
break
self.__class__.info = info
def _not_impl(self):
pass
def _is_i386(self):
return self.info['isainfo_n'] == 'i386'
def _is_sparc(self):
return self.info['isainfo_n'] == 'sparc'
def _is_sparcv9(self):
return self.info['isainfo_n'] == 'sparcv9'
def _getNCPUs(self):
return int(self.info['uname_X'].get('NumCPU', 1))
def _is_sun4(self):
return self.info['arch'] == 'sun4'
def _is_SUNW(self):
return re.match(r'SUNW', self.info['uname_i']) is not None
def _is_sparcstation5(self):
return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None
def _is_ultra1(self):
return re.match(r'.*Ultra-1', self.info['uname_i']) is not None
def _is_ultra250(self):
return re.match(r'.*Ultra-250', self.info['uname_i']) is not None
def _is_ultra2(self):
return re.match(r'.*Ultra-2', self.info['uname_i']) is not None
def _is_ultra30(self):
return re.match(r'.*Ultra-30', self.info['uname_i']) is not None
def _is_ultra4(self):
return re.match(r'.*Ultra-4', self.info['uname_i']) is not None
def _is_ultra5_10(self):
return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None
def _is_ultra5(self):
return re.match(r'.*Ultra-5', self.info['uname_i']) is not None
def _is_ultra60(self):
return re.match(r'.*Ultra-60', self.info['uname_i']) is not None
def _is_ultra80(self):
return re.match(r'.*Ultra-80', self.info['uname_i']) is not None
def _is_ultraenterprice(self):
return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None
def _is_ultraenterprice10k(self):
return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None
def _is_sunfire(self):
return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None
def _is_ultra(self):
return re.match(r'.*Ultra', self.info['uname_i']) is not None
def _is_cpusparcv7(self):
return self.info['processor'] == 'sparcv7'
def _is_cpusparcv8(self):
return self.info['processor'] == 'sparcv8'
def _is_cpusparcv9(self):
return self.info['processor'] == 'sparcv9'
class Win32CPUInfo(CPUInfoBase):
info = None
pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor"
# XXX: what does the value of
# HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0
# mean?
def __init__(self):
if self.info is not None:
return
info = []
try:
#XXX: Bad style to use so long `try:...except:...`. Fix it!
import _winreg
prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)" \
"\s+stepping\s+(?P<STP>\d+)", re.IGNORECASE)
chnd = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey)
pnum = 0
while 1:
try:
proc = _winreg.EnumKey(chnd, pnum)
except _winreg.error:
break
else:
pnum += 1
info.append({"Processor": proc})
phnd = _winreg.OpenKey(chnd, proc)
pidx = 0
while True:
try:
name, value, vtpe = _winreg.EnumValue(phnd, pidx)
except _winreg.error:
break
else:
pidx = pidx + 1
info[-1][name] = value
if name == "Identifier":
srch = prgx.search(value)
if srch:
info[-1]["Family"] = int(srch.group("FML"))
info[-1]["Model"] = int(srch.group("MDL"))
info[-1]["Stepping"] = int(srch.group("STP"))
except:
print( str(sys.exc_info()) + '(ignoring)' )
self.__class__.info = info
def _not_impl(self):
pass
# Athlon
def _is_AMD(self):
return self.info[0]['VendorIdentifier'] == 'AuthenticAMD'
def _is_Am486(self):
return self.is_AMD() and self.info[0]['Family'] == 4
def _is_Am5x86(self):
return self.is_AMD() and self.info[0]['Family'] == 4
def _is_AMDK5(self):
return self.is_AMD() and self.info[0]['Family'] == 5 \
and self.info[0]['Model'] in [0, 1, 2, 3]
def _is_AMDK6(self):
return self.is_AMD() and self.info[0]['Family'] == 5 \
and self.info[0]['Model'] in [6, 7]
def _is_AMDK6_2(self):
return self.is_AMD() and self.info[0]['Family'] == 5 \
and self.info[0]['Model'] == 8
def _is_AMDK6_3(self):
return self.is_AMD() and self.info[0]['Family'] == 5 \
and self.info[0]['Model'] == 9
def _is_AMDK7(self):
return self.is_AMD() and self.info[0]['Family'] == 6
# To reliably distinguish between the different types of AMD64 chips
# (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would
# require looking at the 'brand' from cpuid
def _is_AMD64(self):
return self.is_AMD() and self.info[0]['Family'] == 15
# Intel
def _is_Intel(self):
return self.info[0]['VendorIdentifier'] == 'GenuineIntel'
def _is_i386(self):
return self.info[0]['Family'] == 3
def _is_i486(self):
return self.info[0]['Family'] == 4
def _is_i586(self):
return self.is_Intel() and self.info[0]['Family'] == 5
def _is_i686(self):
return self.is_Intel() and self.info[0]['Family'] == 6
def _is_Pentium(self):
return self.is_Intel() and self.info[0]['Family'] == 5
def _is_PentiumMMX(self):
return self.is_Intel() and self.info[0]['Family'] == 5 \
and self.info[0]['Model'] == 4
def _is_PentiumPro(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] == 1
def _is_PentiumII(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [3, 5, 6]
def _is_PentiumIII(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [7, 8, 9, 10, 11]
def _is_PentiumIV(self):
return self.is_Intel() and self.info[0]['Family'] == 15
def _is_PentiumM(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [9, 13, 14]
def _is_Core2(self):
return self.is_Intel() and self.info[0]['Family'] == 6 \
and self.info[0]['Model'] in [15, 16, 17]
# Varia
def _is_singleCPU(self):
return len(self.info) == 1
def _getNCPUs(self):
return len(self.info)
def _has_mmx(self):
if self.is_Intel():
return (self.info[0]['Family'] == 5 and self.info[0]['Model'] == 4) \
or (self.info[0]['Family'] in [6, 15])
elif self.is_AMD():
return self.info[0]['Family'] in [5, 6, 15]
else:
return False
def _has_sse(self):
if self.is_Intel():
return (self.info[0]['Family'] == 6 and \
self.info[0]['Model'] in [7, 8, 9, 10, 11]) \
or self.info[0]['Family'] == 15
elif self.is_AMD():
return (self.info[0]['Family'] == 6 and \
self.info[0]['Model'] in [6, 7, 8, 10]) \
or self.info[0]['Family'] == 15
else:
return False
def _has_sse2(self):
if self.is_Intel():
return self.is_Pentium4() or self.is_PentiumM() \
or self.is_Core2()
elif self.is_AMD():
return self.is_AMD64()
else:
return False
def _has_3dnow(self):
return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15]
def _has_3dnowext(self):
return self.is_AMD() and self.info[0]['Family'] in [6, 15]
if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?)
cpuinfo = LinuxCPUInfo
elif sys.platform.startswith('irix'):
cpuinfo = IRIXCPUInfo
elif sys.platform == 'darwin':
cpuinfo = DarwinCPUInfo
elif sys.platform.startswith('sunos'):
cpuinfo = SunOSCPUInfo
elif sys.platform.startswith('win32'):
cpuinfo = Win32CPUInfo
elif sys.platform.startswith('cygwin'):
cpuinfo = LinuxCPUInfo
#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices.
else:
cpuinfo = CPUInfoBase
cpu = cpuinfo()
if __name__ == "__main__":
cpu.is_blaa()
cpu.is_Intel()
cpu.is_Alpha()
print( 'CPU information:' )
for name in dir(cpuinfo):
if name[0] == '_' and name[1] != '_':
r = getattr(cpu, name[1:])()
if r:
if r != 1:
print( '%s=%s' % (name[1:], r) )
else:
print( name[1:] )
print | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/numexprz/cpuinfo.py | cpuinfo.py |
import __future__
import sys
import numpy
from numexprz import interpreter, expressions, use_vml
# RAM: pyTables imports is_cpu_amd_intel indirectly from here...
#from numexpr.utils import CacheDict, is_cpu_amd_intel
from numexprz.utils import CacheDict
# Declare a double type that does not exist in Python space
double = numpy.double
complex64 = numpy.complex64
if sys.version_info[0] < 3:
int_ = numpy.int32
long_ = numpy.int64
else:
int_ = numpy.int32
long_ = numpy.int64
typecode_to_kind = {'b': 'bool', 'i': 'int', 'l': 'long', 'f': 'float',
'd': 'double', 'c': 'complex', 'x' : 'complex64',
's': 'bytes', 'n': 'none'}
kind_to_typecode = {'bool': 'b', 'int': 'i', 'long': 'l', 'float': 'f',
'double': 'd', 'complex': 'c', 'complex64' : 'x',
'bytes': 's', 'none': 'n'}
type_to_typecode = {bool: 'b', int_: 'i', long_: 'l', float: 'f',
double: 'd', complex: 'c', complex64: 'x', bytes: 's'}
type_to_kind = expressions.type_to_kind
kind_to_type = expressions.kind_to_type
default_type = kind_to_type[expressions.default_kind]
# Final addtions for Python 3 (mainly for PyTables needs)
if sys.version_info[0] > 2:
typecode_to_kind['s'] = 'str'
kind_to_typecode['str'] = 's'
type_to_typecode[str] = 's'
unichr = chr
scalar_constant_kinds = kind_to_typecode.keys()
class ASTNode(object):
"""Abstract Syntax Tree node.
Members:
astType -- type of node (op, constant, variable, raw, or alias)
astKind -- the type of the result (bool, float, etc.)
value -- value associated with this node.
An opcode, numerical value, a variable name, etc.
children -- the children below this node
reg -- the register assigned to the result for this node.
"""
cmpnames = ['astType', 'astKind', 'value', 'children']
def __init__(self, astType='generic', astKind='unknown',
value=None, children=()):
object.__init__(self)
self.astType = astType
self.astKind = astKind
self.value = value
self.children = tuple(children)
self.reg = None
def __eq__(self, other):
if self.astType == 'alias':
self = self.value
if other.astType == 'alias':
other = other.value
if not isinstance(other, ASTNode):
return False
for name in self.cmpnames:
if getattr(self, name) != getattr(other, name):
return False
return True
def __hash__(self):
if self.astType == 'alias':
self = self.value
return hash((self.astType, self.astKind, self.value, self.children))
def __str__(self):
return 'AST(%s, %s, %s, %s, %s)' % (self.astType, self.astKind,
self.value, self.children, self.reg)
def __repr__(self):
return '<AST object at %s>' % id(self)
def key(self):
return (self.astType, self.astKind, self.value, self.children)
def typecode(self):
return kind_to_typecode[self.astKind]
def postorderWalk(self):
for c in self.children:
for w in c.postorderWalk():
yield w
yield self
def allOf(self, *astTypes):
astTypes = set(astTypes)
for w in self.postorderWalk():
if w.astType in astTypes:
yield w
def expressionToAST(ex):
"""Take an expression tree made out of expressions.ExpressionNode,
and convert to an AST tree.
This is necessary as ExpressionNode overrides many methods to act
like a number.
"""
return ASTNode(ex.astType, ex.astKind, ex.value,
[expressionToAST(c) for c in ex.children])
def sigPerms(s):
"""Generate all possible signatures derived by upcasting the given
signature.
"""
codes = 'bilfdcx'
if not s:
yield ''
elif s[0] in codes:
start = codes.index(s[0])
for x in codes[start:]:
for y in sigPerms(s[1:]):
yield x + y
elif s[0] == 's': # numbers shall not be cast to strings
for y in sigPerms(s[1:]):
yield 's' + y
else:
yield s
def typeCompileAst(ast):
"""Assign appropiate types to each node in the AST.
Will convert opcodes and functions to appropiate upcast version,
and add "cast" ops if needed.
"""
children = list(ast.children)
if ast.astType == 'op':
retsig = ast.typecode()
basesig = ''.join(x.typecode() for x in list(ast.children))
# Find some operation that will work on an acceptable casting of args.
for sig in sigPerms(basesig):
value = (ast.value + '_' + retsig + sig).encode('latin-1')
if value in interpreter.opcodes:
break
else:
for sig in sigPerms(basesig):
funcname = (ast.value + '_' + retsig + sig).encode('latin-1')
if funcname in interpreter.funccodes:
value = ('func_%sn' % (retsig + sig)).encode('latin-1')
children += [ASTNode('raw', 'none',
interpreter.funccodes[funcname])]
break
else:
raise NotImplementedError(
"couldn't find matching opcode for '%s'"
% (ast.value + '_' + retsig + basesig))
# First just cast constants, then cast variables if necessary:
for i, (have, want) in enumerate(zip(basesig, sig)):
if have != want:
kind = typecode_to_kind[want]
if children[i].astType == 'constant':
children[i] = ASTNode('constant', kind, children[i].value)
else:
opname = "cast"
children[i] = ASTNode('op', kind, opname, [children[i]])
else:
value = ast.value
children = ast.children
return ASTNode(ast.astType, ast.astKind, value,
[typeCompileAst(c) for c in children])
class Register(object):
"""Abstraction for a register in the VM.
Members:
node -- the AST node this corresponds to
temporary -- True if this isn't an input or output
immediate -- not a register, but an immediate value
n -- the physical register number.
None if no number assigned yet.
"""
def __init__(self, astnode, temporary=False):
self.node = astnode
self.temporary = temporary
self.immediate = False
self.n = None
def __str__(self):
if self.temporary:
name = 'Temporary'
else:
name = 'Register'
return '%s(%s, %s, %s)' % (name, self.node.astType,
self.node.astKind, self.n,)
def __repr__(self):
return self.__str__()
class Immediate(Register):
"""Representation of an immediate (integer) operand, instead of
a register.
"""
def __init__(self, astnode):
Register.__init__(self, astnode)
self.immediate = True
def __str__(self):
return 'Immediate(%d)' % (self.node.value,)
def stringToExpression(s, types, context):
"""Given a string, convert it to a tree of ExpressionNode's.
"""
old_ctx = expressions._context.get_current_context()
try:
expressions._context.set_new_context(context)
# first compile to a code object to determine the names
if context.get('truediv', False):
flags = __future__.division.compiler_flag
else:
flags = 0
c = compile(s, '<expr>', 'eval', flags)
# make VariableNode's for the names
names = {}
for name in c.co_names:
if name == "None":
names[name] = None
elif name == "True":
names[name] = True
elif name == "False":
names[name] = False
else:
t = types.get(name, default_type)
names[name] = expressions.VariableNode(name, type_to_kind[t])
names.update(expressions.functions)
# now build the expression
ex = eval(c, names)
if expressions.isConstant(ex):
ex = expressions.ConstantNode(ex, expressions.getKind(ex))
elif not isinstance(ex, expressions.ExpressionNode):
raise TypeError("unsupported expression type: %s" % type(ex))
finally:
expressions._context.set_new_context(old_ctx)
return ex
def isReduction(ast):
return ast.value.startswith(b'sum_') or ast.value.startswith(b'prod_')
def getInputOrder(ast, input_order=None):
"""Derive the input order of the variables in an expression.
"""
variables = {}
for a in ast.allOf('variable'):
variables[a.value] = a
variable_names = set(variables.keys())
if input_order:
if variable_names != set(input_order):
raise ValueError(
"input names (%s) don't match those found in expression (%s)"
% (input_order, variable_names))
ordered_names = input_order
else:
ordered_names = list(variable_names)
ordered_names.sort()
ordered_variables = [variables[v] for v in ordered_names]
return ordered_variables
def convertConstantToKind(x, kind):
# Exception for 'float' types that will return the NumPy float32 type
if kind == 'float':
return numpy.float32(x)
return kind_to_type[kind](x)
def getConstants(ast):
const_map = {}
for a in ast.allOf('constant'):
const_map[(a.astKind, a.value)] = a
ordered_constants = list( const_map.keys() )
ordered_constants.sort()
constants_order = [const_map[v] for v in ordered_constants]
constants = [convertConstantToKind(a.value, a.astKind)
for a in constants_order]
return constants_order, constants
def sortNodesByOrder(nodes, order):
order_map = {}
for i, (_, v, _) in enumerate(order):
order_map[v] = i
dec_nodes = [(order_map[n.value], n) for n in nodes]
dec_nodes.sort()
return [a[1] for a in dec_nodes]
def assignLeafRegisters(inodes, registerMaker):
"""Assign new registers to each of the leaf nodes.
"""
leafRegisters = {}
for node in inodes:
key = node.key()
if key in leafRegisters:
node.reg = leafRegisters[key]
else:
node.reg = leafRegisters[key] = registerMaker(node)
def assignBranchRegisters(inodes, registerMaker):
"""Assign temporary registers to each of the branch nodes.
"""
for node in inodes:
node.reg = registerMaker(node, temporary=True)
def collapseDuplicateSubtrees(ast):
"""Common subexpression elimination.
"""
seen = {}
aliases = []
for a in ast.allOf('op'):
if a in seen:
target = seen[a]
a.astType = 'alias'
a.value = target
a.children = ()
aliases.append(a)
else:
seen[a] = a
# Set values and registers so optimizeTemporariesAllocation
# doesn't get confused
for a in aliases:
while a.value.astType == 'alias':
a.value = a.value.value
return aliases
def optimizeTemporariesAllocation(ast):
"""Attempt to minimize the number of temporaries needed, by
reusing old ones.
"""
nodes = [n for n in ast.postorderWalk() if n.reg.temporary]
users_of = dict((n.reg, set()) for n in nodes)
# node_regs = dict((n, set(c.reg for c in n.children if c.reg.temporary))
# for n in nodes)
if nodes and nodes[-1] is not ast:
nodes_to_check = nodes + [ast]
else:
nodes_to_check = nodes
for n in nodes_to_check:
for c in n.children:
if c.reg.temporary:
users_of[c.reg].add(n)
unused = dict([(tc, set()) for tc in scalar_constant_kinds])
for n in nodes:
for c in n.children:
reg = c.reg
if reg.temporary:
users = users_of[reg]
users.discard(n)
if not users:
unused[reg.node.astKind].add(reg)
if unused[n.astKind]:
reg = unused[n.astKind].pop()
users_of[reg] = users_of[n.reg]
n.reg = reg
def setOrderedRegisterNumbers(order, start):
"""Given an order of nodes, assign register numbers.
"""
for i, node in enumerate(order):
node.reg.n = start + i
return start + len(order)
def setRegisterNumbersForTemporaries(ast, start):
"""Assign register numbers for temporary registers, keeping track of
aliases and handling immediate operands.
"""
seen = 0
signature = ''
aliases = []
for node in ast.postorderWalk():
if node.astType == 'alias':
aliases.append(node)
node = node.value
if node.reg.immediate:
node.reg.n = node.value
continue
reg = node.reg
if reg.n is None:
reg.n = start + seen
seen += 1
signature += reg.node.typecode()
for node in aliases:
node.reg = node.value.reg
return start + seen, signature
def convertASTtoThreeAddrForm(ast):
"""Convert an AST to a three address form.
Three address form is (op, reg1, reg2, reg3), where reg1 is the
destination of the result of the instruction.
I suppose this should be called three register form, but three
address form is found in compiler theory.
"""
return [(node.value, node.reg) + tuple([c.reg for c in node.children])
for node in ast.allOf('op')]
def compileThreeAddrForm(program):
"""Given a three address form of the program, compile it a string that
the VM understands.
"""
def nToChr(reg):
if reg is None:
return b'\xff'
elif reg.n < 0:
raise ValueError("negative value for register number %s" % reg.n)
else:
if sys.version_info[0] < 3:
return chr(reg.n)
else:
# int.to_bytes is not available in Python < 3.2
#return reg.n.to_bytes(1, sys.byteorder)
return bytes([reg.n])
def quadrupleToString(opcode, store, a1=None, a2=None):
# This has been changed above to redefine to chr in Python 3
cop = unichr(interpreter.opcodes[opcode]).encode('latin-1')
# cop = str(interpreter.opcodes[opcode]).encode('latin-1')
cs = nToChr(store)
ca1 = nToChr(a1)
ca2 = nToChr(a2)
return cop + cs + ca1 + ca2
def toString(args):
while len(args) < 4:
args += (None,)
opcode, store, a1, a2 = args[:4]
s = quadrupleToString(opcode, store, a1, a2)
l = [s]
args = args[4:]
while args:
s = quadrupleToString(b'noop', *args[:3])
l.append(s)
args = args[3:]
return b''.join(l)
prog_str = b''.join([toString(t) for t in program])
return prog_str
context_info = [
('optimization', ('none', 'moderate', 'aggressive'), 'aggressive'),
('truediv', (False, True, 'auto'), 'auto')
]
def getContext(kwargs, frame_depth=1):
d = kwargs.copy()
context = {}
for name, allowed, default in context_info:
value = d.pop(name, default)
if value in allowed:
context[name] = value
else:
raise ValueError("'%s' must be one of %s" % (name, allowed))
if d:
raise ValueError("Unknown keyword argument '%s'" % d.popitem()[0])
if context['truediv'] == 'auto':
caller_globals = sys._getframe(frame_depth + 1).f_globals
context['truediv'] = \
caller_globals.get('division', None) == __future__.division
return context
def precompile(ex, signature=(), context={}):
"""Compile the expression to an intermediate form.
"""
types = dict(signature)
input_order = [name for (name, type_) in signature]
if (type(ex) == 'str' or type(ex) == 'unicode'):
ex = stringToExpression(ex, types, context)
if sys.version_info.major == 2 and isinstance( ex, (str,unicode)):
ex = stringToExpression(ex, types, context)
elif isinstance( ex, (str,bytes)):
ex = stringToExpression(ex, types, context)
# the AST is like the expression, but the node objects don't have
# any odd interpretations
ast = expressionToAST(ex)
if ex.astType != 'op':
ast = ASTNode('op', value='copy', astKind=ex.astKind, children=(ast,))
ast = typeCompileAst(ast)
aliases = collapseDuplicateSubtrees(ast)
assignLeafRegisters(ast.allOf('raw'), Immediate)
assignLeafRegisters(ast.allOf('variable', 'constant'), Register)
assignBranchRegisters(ast.allOf('op'), Register)
# assign registers for aliases
for a in aliases:
a.reg = a.value.reg
input_order = getInputOrder(ast, input_order)
constants_order, constants = getConstants(ast)
if isReduction(ast):
ast.reg.temporary = False
optimizeTemporariesAllocation(ast)
ast.reg.temporary = False
r_output = 0
ast.reg.n = 0
r_inputs = r_output + 1
r_constants = setOrderedRegisterNumbers(input_order, r_inputs)
r_temps = setOrderedRegisterNumbers(constants_order, r_constants)
r_end, tempsig = setRegisterNumbersForTemporaries(ast, r_temps)
threeAddrProgram = convertASTtoThreeAddrForm(ast)
input_names = tuple([a.value for a in input_order])
signature = ''.join(type_to_typecode[types.get(x, default_type)]
for x in input_names)
return threeAddrProgram, signature, tempsig, constants, input_names
def NumExpr(ex, signature=(), **kwargs):
"""
Compile an expression built using E.<variable> variables to a function.
ex can also be specified as a string "2*a+3*b".
The order of the input variables and their types can be specified using the
signature parameter, which is a list of (name, type) pairs.
Returns a `NumExpr` object containing the compiled function.
"""
# NumExpr can be called either directly by the end-user, in which case
# kwargs need to be sanitized by getContext, or by evaluate,
# in which case kwargs are in already sanitized.
# In that case frame_depth is wrong (it should be 2) but it doesn't matter
# since it will not be used (because truediv='auto' has already been
# translated to either True or False).
context = getContext(kwargs, frame_depth=1)
threeAddrProgram, inputsig, tempsig, constants, input_names = \
precompile(ex, signature, context)
program = compileThreeAddrForm(threeAddrProgram)
# print( inputsig )
# print( tempsig )
# print( program )
# print( constants )
# print( input_names )
return interpreter.NumExpr(inputsig.encode('latin-1'),
tempsig.encode('latin-1'),
program, constants, input_names)
def disassemble(nex):
"""
Given a NumExpr object, return a list which is the program disassembled.
"""
rev_opcodes = {}
for op in interpreter.opcodes:
rev_opcodes[interpreter.opcodes[op]] = op
r_constants = 1 + len(nex.signature)
r_temps = r_constants + len(nex.constants)
def getArg(pc, offset):
if sys.version_info[0] < 3:
arg = ord(nex.program[pc + offset])
op = rev_opcodes.get(ord(nex.program[pc]))
else:
arg = nex.program[pc + offset]
op = rev_opcodes.get(nex.program[pc])
try:
code = op.split(b'_')[1][offset - 1]
except IndexError:
return None
if sys.version_info[0] > 2:
# int.to_bytes is not available in Python < 3.2
#code = code.to_bytes(1, sys.byteorder)
code = bytes([code])
if arg == 255:
return None
if code != b'n':
if arg == 0:
return b'r0'
elif arg < r_constants:
return ('r%d[%s]' % (arg, nex.input_names[arg - 1])).encode('latin-1')
elif arg < r_temps:
return ('c%d[%s]' % (arg, nex.constants[arg - r_constants])).encode('latin-1')
else:
return ('t%d' % (arg,)).encode('latin-1')
else:
return arg
source = []
for pc in range(0, len(nex.program), 4):
if sys.version_info[0] < 3:
op = rev_opcodes.get(ord(nex.program[pc]))
else:
op = rev_opcodes.get(nex.program[pc])
dest = getArg(pc, 1)
arg1 = getArg(pc, 2)
arg2 = getArg(pc, 3)
source.append((op, dest, arg1, arg2))
return source
def getType(a):
kind = a.dtype.kind
if kind == 'b':
return bool
if kind in 'iu':
if a.dtype.itemsize > 4:
return long_ # ``long`` is for integers of more than 32 bits
if kind == 'u' and a.dtype.itemsize == 4:
return long_ # use ``long`` here as an ``int`` is not enough
return int_
if kind == 'f':
if a.dtype.itemsize > 4:
return double # ``double`` is for floats of more than 32 bits
return float
if kind == 'c':
# RAM need to distinguish between complex64 and complex128 here
if a.dtype.itemsize > 8:
return complex
return complex64
if kind == 'S':
return bytes
raise ValueError("unknown type %s" % a.dtype.name)
def getExprNames(text, context):
ex = stringToExpression(text, {}, context)
ast = expressionToAST(ex)
input_order = getInputOrder(ast, None)
#try to figure out if vml operations are used by expression
if not use_vml:
ex_uses_vml = False
else:
for node in ast.postorderWalk():
if node.astType == 'op' \
and node.value in ['sin', 'cos', 'exp', 'log',
'expm1', 'log1p',
'pow', 'div',
'sqrt', 'inv',
'sinh', 'cosh', 'tanh',
'arcsin', 'arccos', 'arctan',
'arccosh', 'arcsinh', 'arctanh',
'arctan2', 'abs']:
ex_uses_vml = True
break
else:
ex_uses_vml = False
return [a.value for a in input_order], ex_uses_vml
# Dictionaries for caching variable names and compiled expressions
_names_cache = CacheDict(512)
_numexpr_cache = CacheDict(512)
def evaluate(ex, local_dict=None, global_dict=None,
out=None, order='K', casting='safe', **kwargs):
"""Evaluate a simple array expression element-wise, using the new iterator.
ex is a string forming an expression, like "2*a+3*b". The values for "a"
and "b" will by default be taken from the calling function's frame
(through use of sys._getframe()). Alternatively, they can be specifed
using the 'local_dict' or 'global_dict' arguments.
Parameters
----------
local_dict : dictionary, optional
A dictionary that replaces the local operands in current frame.
global_dict : dictionary, optional
A dictionary that replaces the global operands in current frame.
out : NumPy array, optional
An existing array where the outcome is going to be stored. Care is
required so that this array has the same shape and type than the
actual outcome of the computation. Useful for avoiding unnecessary
new array allocations.
order : {'C', 'F', 'A', or 'K'}, optional
Controls the iteration order for operands. 'C' means C order, 'F'
means Fortran order, 'A' means 'F' order if all the arrays are
Fortran contiguous, 'C' order otherwise, and 'K' means as close to
the order the array elements appear in memory as possible. For
efficient computations, typically 'K'eep order (the default) is
desired.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy or
buffering. Setting this to 'unsafe' is not recommended, as it can
adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
"""
if sys.version_info.major == 2:
if not isinstance( ex, (str,unicode)):
raise ValueError("must specify expression as a string")
else:
if not isinstance( ex, (str,bytes)):
raise ValueError("must specify expression as a string")
# Get the names for this expression
context = getContext(kwargs, frame_depth=1)
expr_key = (ex, tuple(sorted(context.items())))
if expr_key not in _names_cache:
_names_cache[expr_key] = getExprNames(ex, context)
names, ex_uses_vml = _names_cache[expr_key]
# Get the arguments based on the names.
call_frame = sys._getframe(1)
if local_dict is None:
local_dict = call_frame.f_locals
if global_dict is None:
global_dict = call_frame.f_globals
arguments = []
for name in names:
try:
a = local_dict[name]
except KeyError:
a = global_dict[name]
arguments.append(numpy.asarray(a))
# Create a signature
signature = [(name, getType(arg)) for (name, arg) in zip(names, arguments)]
# Look up numexpr if possible.
numexpr_key = expr_key + (tuple(signature),)
try:
compiled_ex = _numexpr_cache[numexpr_key]
except KeyError:
compiled_ex = _numexpr_cache[numexpr_key] = \
NumExpr(ex, signature, **context)
kwargs = {'out': out, 'order': order, 'casting': casting,
'ex_uses_vml': ex_uses_vml}
return compiled_ex(*arguments, **kwargs) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/numexprz/necompiler.py | necompiler.py |
# RAM: numexpr seems to be a little challenging to use absolute_imports with
# I get an error in Python 3.4 saying it can't find PyInit_interpreter ...
#from __future__ import (division, absolute_import, print_function)
#from __config__ import show as show_config
from .__config__ import get_info
if get_info('mkl'):
print( "Using Intel Vectory Math Library for Numexprz" )
use_vml = True
else:
use_vml = False
from .cpuinfo import cpu
if cpu.is_AMD() or cpu.is_Intel():
is_cpu_amd_intel = True
else:
is_cpu_amd_intel = False
import os, os.path
import platform
from numexprz.expressions import E
from numexprz.necompiler import NumExpr, disassemble, evaluate
#from numexprz.tests import test, print_versions
from numexprz.utils import (
get_vml_version, set_vml_accuracy_mode, set_vml_num_threads,
set_num_threads, detect_number_of_cores, detect_number_of_threads)
# Detect the number of cores
# RAM: the functions in util don't update numexpr.ncores or numexpr.nthreads,
def countPhysicalProcessors():
cpuInfo = cpu.info
physicalIDs = []
for J, cpuDict in enumerate( cpuInfo ):
if not cpuDict['physical id'] in physicalIDs:
physicalIDs.append( cpuDict['physical id'] )
return len( physicalIDs )
try:
ncores = int(cpu.info[0]['cpu cores']) * countPhysicalProcessors()
nthreads = ncores
except:
ncores = detect_number_of_cores()
nthreads = detect_number_of_threads()
# Initialize the number of threads to be used
if 'sparc' in platform.machine():
import warnings
warnings.warn('The number of threads have been set to 1 because problems related '
'to threading have been reported on some sparc machine. '
'The number of threads can be changed using the "set_num_threads" '
'function.')
set_num_threads(1)
else:
set_num_threads(nthreads)
# The default for VML is 1 thread (see #39)
set_vml_num_threads(1)
from . import version
dirname = os.path.dirname(__file__)
__version__ = version.version | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/numexprz/__init__.py | __init__.py |
import numpy as np
import math
import scipy.ndimage
import matplotlib.pyplot as plt
# from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
import mpl_toolkits.axes_grid1.anchored_artists
import os, os.path
import subprocess
from multiprocessing.pool import ThreadPool
#### STATIC HANDLE ####
# Not sure if this is used here, useful for storing data in the function as if
# it is a Python object, for repeated operations
def static_var(varname, value):
def decorate(func):
setattr(func, varname, value)
return func
return decorate
############### IMAGE UTILITY FUNCTIONS ###############
def ravel_trimat( trimat ):
"""
Return's a 1-D representation of non-zero elements of trimat.
raveled, unravelIndices = ravel_trimat( triangularMatrix )
The unravelIndices is necessary to unravel the vector back into matrix form.
"""
[M,N] = trimat.shape
triIndices = trimat.astype('bool')
vectorIndices = np.arange(0,triIndices.size)[np.ravel( triIndices )]
unravelIndices = np.unravel_index( vectorIndices, [M,N] )
raveled = np.ravel( trimat[triIndices] )
return raveled, unravelIndices
def unravel_trimat( raveled, unravelIndices, shape=None ):
"""
Undo's a
Note: if shape = None, M,N are taken to be the maximum values in the matrix, so if there's zero columns
on the right, or zero rows on the bottom, they will be cropped in the returned triangular matrix.
"""
if shape == None:
M = np.max( unravelIndices[0] )
N = np.max( unravelIndices[1] )
else:
M = shape[0]; N = shape[1]
unraveled = np.zeros( [M,N] )
unraveled[unravelIndices[0], unravelIndices[1]] = raveled
return unraveled
def apodization( name = 'butter.32', shape= [2048,2048], radius=None ):
""" apodization( name = 'butter.32', size = [2048,2048], radius=None )
Provides a 2-D filter or apodization window for Fourier filtering or image clamping.
Radius = None defaults to shape/2
Valid names are:
'hann' - von Hann cosine window on radius
'hann_square' as above but on X-Y
'hamming' - good for apodization, nonsense as a filter
'butter.X' Butterworth multi-order filter where X is the order of the Lorentzian
'butter_square.X' Butterworth in X-Y
'gauss_trunc' - truncated gaussian, higher performance (smaller PSF) than hann filter
'gauss' - regular gaussian
NOTE: There are windows in scipy.signal for 1D-filtering...
WARNING: doesn't work properly for odd image dimensions
"""
# Make meshes
shape = np.asarray( shape )
if radius is None:
radius = shape/2.0
else:
radius = np.asarray( radius, dtype='float' )
# DEBUG: Doesn't work right for odd numbers
[xmesh,ymesh] = np.meshgrid( np.arange(-shape[1]/2,shape[1]/2), np.arange(-shape[0]/2,shape[0]/2) )
r2mesh = xmesh*xmesh/( np.double(radius[0])**2 ) + ymesh*ymesh/( np.double(radius[1])**2 )
try:
[name, order] = name.lower().split('.')
order = np.double(order)
except ValueError:
order = 1
if name == 'butter':
window = np.sqrt( 1.0 / (1.0 + r2mesh**order ) )
elif name == 'butter_square':
window = np.sqrt( 1.0 / (1.0 + (xmesh/radius[1])**order))*np.sqrt(1.0 / (1.0 + (ymesh/radius[0])**order) )
elif name == 'hann':
cropwin = ((xmesh/radius[1])**2.0 + (ymesh/radius[0])**2.0) <= 1.0
window = cropwin.astype('float') * 0.5 * ( 1.0 + np.cos( 1.0*np.pi*np.sqrt( (xmesh/radius[1])**2.0 + (ymesh/radius[0])**2.0 ) ) )
elif name == 'hann_square':
window = ( (0.5 + 0.5*np.cos( np.pi*( xmesh/radius[1]) ) ) *
(0.5 + 0.5*np.cos( np.pi*( ymesh/radius[0] ) ) ) )
elif name == 'hamming':
cropwin = ((xmesh/radius[1])**2.0 + (ymesh/radius[0])**2.0) <= 1.0
window = cropwin.astype('float') * ( 0.54 + 0.46*np.cos( 1.0*np.pi*np.sqrt( (xmesh/radius[1])**2.0 + (ymesh/radius[0])**2.0 ) ) )
elif name == 'hamming_square':
window = ( (0.54 + 0.46*np.cos( np.pi*( xmesh/radius[1]) ) ) *
(0.54 + 0.46*np.cos( np.pi*( ymesh/radius[0] ) ) ) )
elif name == 'gauss' or name == 'gaussian':
window = np.exp( -(xmesh/radius[1])**2.0 - (ymesh/radius[0])**2.0 )
elif name == 'gauss_trunc':
cropwin = ((0.5*xmesh/radius[1])**2.0 + (0.5*ymesh/radius[0])**2.0) <= 1.0
window = cropwin.astype('float') * np.exp( -(xmesh/radius[1])**2.0 - (ymesh/radius[0])**2.0 )
elif name == 'lanczos':
print( "TODO: Implement Lanczos window" )
return
else:
print( "Error: unknown filter name passed into apodization" )
return
return window
def edge_mask( maskShape=[2048,2048], edges=[64,64,64,64] ):
"""
Generate a mask with edges removed to [y1,y2,x1,x2]
"""
edgemask = np.ones( maskShape )
[xmesh,ymesh] = np.meshgrid( np.arange(0,maskShape[1]), np.arange(0,maskShape[0]) )
edgemask *= xmesh >= edges[2]
edgemask *= xmesh <= maskShape[1] - edges[3]
edgemask *= ymesh >= edges[0]
edgemask *= ymesh <= maskShape[0] - edges[1]
edgemask = np.reshape( edgemask, [1, edgemask.shape[0], edgemask.shape[1]]).astype( 'bool' )
return edgemask
@static_var( "rfloor", None )
@static_var( "rceil", None )
@static_var( "rmax", 0 )
@static_var( "remain", 0 )
@static_var( "remain_n", 0 )
@static_var( "weights", 0 )
@static_var( "raxis", 0 )
@static_var( "prevN", 0 )
@static_var( "prevM", 0 )
@static_var( "weights", 0 )
@static_var( "raxis", 0 )
def rotmean( mage ):
"""
Computes the rotational mean about the center of the image. Generally used
on the magnitude of Fourier transforms. Uses static variables that accelerates
the precomputation of the meshes if you call it repeatedly on the same
dimension arrays.
NOTE: returns both rmean, raxis so you must handle the raxis part.
Mage should be a power of two. If it's not, it's padded automatically
"""
if np.mod( mage.shape[1],2 ) == 1 and np.mod( mage.shape[0],2) == 1:
mage = np.pad( mage, ((0,1),(0,1)), 'edge' )
elif np.mod( mage.shape[1],2 ) == 1:
mage = np.pad( mage, ((0,0),(0,1)), 'edge' )
elif np.mod( mage.shape[0],2 ) == 1:
mage = np.pad( mage, ((0,1),(0,0)), 'edge' )
N = int( np.floor( mage.shape[1]/2.0 ) )
M = int( np.floor( mage.shape[0]/2.0 ) )
if N != rotmean.prevN or M != rotmean.prevM:
# Initialize everything
rotmean.prevN = N
rotmean.prevM = M
rotmean.rmax = np.int( np.ceil( np.sqrt( N**2 + M**2 ) ) + 1 )
[xmesh, ymesh] = np.meshgrid( np.arange(-N, N), np.arange(-M, M) )
rmesh = np.sqrt( xmesh**2 + ymesh**2 )
rotmean.rfloor = np.floor( rmesh )
rotmean.remain = rmesh - rotmean.rfloor
# Make rfloor into an index look-up table
rotmean.rfloor = rotmean.rfloor.ravel().astype('int')
rotmean.rceil = (rotmean.rfloor+1).astype('int')
# Ravel
rotmean.remain = rotmean.remain.ravel()
rotmean.remain_n = 1.0 - rotmean.remain
# rotmean.weights = np.zeros( [rotmean.rmax] )
# weights_n = np.zeros( [rotmean.rmax] )
#
# weights_n[rotmean.rfloor] += rotmean.remain_n
# rotmean.weights[ (rotmean.rfloor+1) ] = rotmean.remain
# rotmean.weights += weights_n
rotmean.weights = np.bincount( rotmean.rceil, rotmean.remain ) + np.bincount( rotmean.rfloor, rotmean.remain_n, minlength=rotmean.rmax )
rotmean.raxis = np.arange(0,rotmean.weights.size)
else:
# Same size image as previous time
# Excellent now only 150 ms in here for 2k x 2k...
# Rotmean_old was 430 ms on the desktop
pass
# I can flatten remain and mage
mage = mage.ravel()
mage_p = mage * rotmean.remain
mage_n = mage * rotmean.remain_n
# rmean = np.zeros( np.size(rotmean.weights) )
# rmean_n = np.zeros( np.size(rotmean.weights) )
# Find lower ("negative") remainders
#rmean_n = np.bincount( rotmean.rfloor, mage_n )
#rmean_n[rotmean.rfloor] += mage_n
# Add one to indexing array and add positive remainders to next-neighbours in sum
#rmean[ (rotmean.rfloor+1) ] += mage_p
rmean = np.bincount( rotmean.rceil, mage_p ) + np.bincount( rotmean.rfloor, mage_n, minlength=rotmean.rmax )
# sum
# rmean += rmean_n
# and normalize sum to average
rmean /= rotmean.weights
return [rmean, rotmean.raxis]
def normalize(a):
""" Normalizes the input to the range [0.0,1.0].
Returns floating point if integer data is passed in."""
if np.issubdtype( a.dtype, np.integer ):
a = a.astype( 'float' )
amin = a.min()
arange = (a.max() - amin)
a -= amin
a /= arange
return a
def imageShiftAndCrop( mage, shiftby ):
""" imageShiftAndCrop( mage, shiftby )
This is a relative shift, integer pixel only, pads with zeros to cropped edges
mage = input image
shiftby = [y,x] pixel shifts
"""
# Actually best approach is probably to roll and then zero out the parts we don't want
# The pad function is expensive in comparison
shiftby = np.array( shiftby, dtype='int' )
# Shift X
if(shiftby[1] < 0 ):
mage = np.roll( mage, shiftby[1], axis=1 )
mage[:, shiftby[1]+mage.shape[1]:] = 0.0
elif shiftby[1] == 0:
pass
else: # positive shift
mage = np.roll( mage, shiftby[1], axis=1 )
mage[:, :shiftby[1]] = 0.0
# Shift Y
if( shiftby[0] < 0 ):
mage = np.roll( mage, shiftby[0], axis=0 )
mage[shiftby[0]+mage.shape[0]:,:] = 0.0
elif shiftby[0] == 0:
pass
else: # positive shift
mage = np.roll( mage, shiftby[0], axis=0 )
mage[:shiftby[0],:] = 0.0
return mage
# Incorporate some static vars for the meshes?
# It's fairly trivial compared to the convolve cost, but if we moved the subPixShift
# outside it's possible.
# Best performance improvement would likely be to put it as a member function in
# ImageRegistrator so that it can work on data in-place.
def lanczosSubPixShift( imageIn, subPixShift, kernelShape=3, lobes=None ):
""" lanczosSubPixShift( imageIn, subPixShift, kernelShape=3, lobes=None )
imageIn = input 2D numpy array
subPixShift = [y,x] shift, recommened not to exceed 1.0, should be float
Random values of kernelShape and lobes gives poor performance. Generally the
lobes has to increase with the kernelShape or you'll get a lowpass filter.
Generally lobes = (kernelShape+1)/2
kernelShape=3 and lobes=2 is a lanczos2 kernel, it has almost no-lowpass character
kernelShape=5 and lobes=3 is a lanczos3 kernel, it's the typical choice
Anything with lobes=1 is a low-pass filter, but next to no ringing artifacts
"""
lanczos_filt = lanczosSubPixKernel( subPixShift, kernelShape=kernelShape, lobes=lobes )
# Accelerate this with a threadPool
imageOut = scipy.ndimage.convolve( imageIn, lanczos_filt, mode='reflect' )
return imageOut
def lanczosSubPixKernel( subPixShift, kernelShape=3, lobes=None ):
"""
Generate a kernel suitable for ni.convolve to subpixally shift an image.
"""
kernelShape = np.array( [kernelShape], dtype='int' )
if kernelShape.ndim == 1: # make it 2-D
kernelShape = np.array( [kernelShape[0], kernelShape[0]], dtype='int' )
if lobes is None:
lobes = (kernelShape[0]+1)/2
x_range = np.arange(-kernelShape[1]/2,kernelShape[1]/2)+1.0-subPixShift[1]
x_range = ( 2.0 / kernelShape[1] ) * x_range
y_range = np.arange(-kernelShape[1]/2,kernelShape[0]/2)+1.0-subPixShift[0]
y_range = ( 2.0 /kernelShape[0] ) * y_range
[xmesh,ymesh] = np.meshgrid( x_range, y_range )
lanczos_filt = np.sinc(xmesh * lobes) * np.sinc(xmesh) * np.sinc(ymesh * lobes) * np.sinc(ymesh)
lanczos_filt = lanczos_filt / np.sum(lanczos_filt) # Normalize filter output
return lanczos_filt
def lanczosSubPixShiftStack( imageStack, translations, n_threads=16 ):
"""
Does subpixel translations shifts for a stack of images using a ThreadPool to distribute the load.
I could make this a general function utility by passing in the function handle.
"""
tPool = ThreadPool( n_threads )
if imageStack.ndim != 3:
raise ValueError( "lanczosSubPixShiftStack() only works on image stacks with Z-axis as the zero dimension" )
slices = imageStack.shape[0]
# Build parameters list for the threaded processeses, consisting of index
tArgs = [None] * slices
for J in np.arange(slices):
tArgs[J] = (J, imageStack, translations)
# All operations are done 'in-place'
tPool.map( lanczosIndexedShift, tArgs )
tPool.close()
tPool.join()
def lanczosIndexedShift( params ):
""" lanczosIndexedShift( params )
params = (index, imageStack, translations, kernelShape=3, lobes=None)
imageStack = input 3D numpy array
translations = [y,x] shift, recommened not to exceed 1.0, should be float
Random values of kernelShape and lobes gives poor performance. Generally the
lobes has to increase with the kernelShape or you'll get a lowpass filter.
Generally lobes = (kernelShape+1)/2
kernelShape=3 and lobes=2 is a lanczos2 kernel, it has almost no-lowpass character
kernelShape=5 and lobes=3 is a lanczos3 kernel, it's the typical choice
Anything with lobes=1 is a low-pass filter, but next to no ringing artifacts
If you cheat and pass in rounded shifts only the roll will be performed, so this can be used to accelerate
roll as well in a parallel environment.
"""
if len( params ) == 3:
[index, imageStack, translations] = params
kernelShape = 3
lobes = None
elif len( params ) == 4:
[index, imageStack, translations, kernelShape] = params
lobes = None
elif len( params ) == 5:
[index, imageStack, translations, kernelShape, lobes] = params
integer_trans = np.round( translations[index,:] ).astype('int')
# Integer shift
imageStack[index,:,:] = np.roll( np.roll( imageStack[index,:,:],
integer_trans[0], axis=0 ),
integer_trans[1], axis=1 )
# Subpixel shift
remain_trans = np.remainder( translations[index,:], 1)
if not (np.isclose( remain_trans[0], 0.0) and np.isclose( remain_trans[1], 0.0) ):
kernel = lanczosSubPixKernel( remain_trans, kernelShape=kernelShape, lobes=lobes )
# RAM: I tried to use the out= keyword but it's perhaps not thread-safe.
imageStack[index,:,:] = scipy.ndimage.convolve( imageStack[index,:,:], kernel, mode='reflect' )
def img2polar(img, center=None, final_radius=None, initial_radius = None, phase_width = None, mode='linear', interpolate='bilinear'):
""" Convert a Cartesian image into polar coordinates.
Center is where to rotate about, typically [M/2, N/2]
final_radius is the maximum r value to interpolate out too, typically N/2
initial radius is where to start (can chop off the center if desired)
phase_width is the pixel count in angle (x-axis)
mode is whether to operate on the log of the radius or not
'linear' = linear radius
'log' log(radius)
interpolate is the interpolation method,
'nn' = nearest neighbour ( should be smoothed afterward)
'bilinear' = bilinear (recommended)
Can only pass in 2-D images at present.
"""
shapeImage = np.array( img.shape )
if center is None:
center = np.round( shapeImage/2.0 )
if final_radius is None:
final_radius = np.min( np.floor(shapeImage/2.0) )
if initial_radius is None:
initial_radius = 0
if phase_width is None:
phase_width = final_radius - initial_radius
if mode == 'lin' or mode == 'linear':
theta , R = np.meshgrid( np.linspace(0, 2*np.pi, phase_width), np.arange(initial_radius, final_radius))
elif mode == 'log':
theta , R = np.meshgrid( np.linspace(0, 2*np.pi, phase_width),
np.logspace(np.log10(1.0+initial_radius), np.log10(final_radius), final_radius-initial_radius) )
R = np.exp( R /( (final_radius-initial_radius)*2.)*np.log( phase_width ) )
Xcart = R * np.cos(theta) + center[1]
Ycart = R * np.sin(theta) + center[0]
if( interpolate == 'nn'):
Xcart = Xcart.astype(int)
Ycart = Ycart.astype(int)
polar_img = img[Ycart,Xcart]
polar_img = np.reshape(polar_img,(final_radius-initial_radius,phase_width))
elif( interpolate == 'bilinear' ):
Xfloor = np.floor( Xcart )
Yfloor = np.floor( Ycart )
Xremain = Xcart - Xfloor
Yremain = Ycart - Yfloor
Xfloor = Xfloor.astype('int') # Can be used for indexing now
Yfloor = Yfloor.astype('int')
# Need to pad the input array by one pixel on the far edge
img = np.pad( img, ((0,1),(0,1)), mode='symmetric' )
# Index the four points
polar_img = img[Yfloor+1,Xfloor+1] *Xremain*Yremain
polar_img += img[Yfloor,Xfloor] * (1.0 - Xremain)*(1.0 - Yremain)
polar_img += img[Yfloor+1,Xfloor] * (1.0 - Xremain)*Yremain
polar_img += img[Yfloor,Xfloor+1] * Xremain*(1.0 - Yremain)
# Crop the far edge, because we interpolated one pixel too far
# polar_img = polar_img[:-1,:-1]
polar_img = np.reshape(polar_img,( np.int(final_radius-initial_radius), np.int(phase_width) ))
return polar_img
def interp2_bilinear(im, x, y):
"""
Ultra-fast interpolation routine for 2-D images. x and y are meshes. The
coordinates of the image are assumed to be 0,shape[0], 0,shape[1]
BUG: This is sometimes skipping the last row and column
"""
x = np.asarray(x)
y = np.asarray(y)
x0 = np.floor(x).astype(int)
x1 = x0 + 1
y0 = np.floor(y).astype(int)
y1 = y0 + 1
# RAM: center this cliping with a roll?
x0 = np.clip(x0, 0, im.shape[1]-1);
x1 = np.clip(x1, 0, im.shape[1]-1);
y0 = np.clip(y0, 0, im.shape[0]-1);
y1 = np.clip(y1, 0, im.shape[0]-1);
Ia = im[ y0, x0 ]
Ib = im[ y1, x0 ]
Ic = im[ y0, x1 ]
Id = im[ y1, x1 ]
wa = (x1-x) * (y1-y)
wb = (x1-x) * (y-y0)
wc = (x-x0) * (y1-y)
wd = (x-x0) * (y-y0)
return wa*Ia + wb*Ib + wc*Ic + wd*Id
def interp2_nn( im, x, y ):
"""
Fast nearest neighbour interpolation, used for more advaced (filtered)
methods such as Lanczos filtering. x and y are meshes. The coordinates of
the image are assumed to be 0,shape[0], 0,shape[1]
"""
# We use floor instead of round because otherwise we end up with a +(0.5,0.5) pixel shift
px = np.floor(x).astype(int)
py = np.floor(y).astype(int)
# Clip checking, could be more efficient because px and py are sorted...
px = np.clip( px, 0, im.shape[1]-1 )
py = np.clip( py, 0, im.shape[0]-1 )
return im[py,px]
def backgroundEstimate( input_image, fitType='gauss2', binFact=128, lpSigma=4.0 ):
"""
Fits a 2D gaussian to a micrograph (which is heavily binned and Gaussian filtered) and returns the estimated
background. In general this is a very robust means to deal with non-uniform illumination or uneven ice.
Uses the basin-hopping algorithm as it's much more robust.
If the background is extremely weak (std < 1.0) then the fitting is ignored and just the mean value is reported.
"""
# background fitting
xcrop, ycrop = np.meshgrid( np.arange(0,input_image.shape[1], binFact), np.arange(0,input_image.shape[0], binFact) )
nn_sum = interp2_nn( scipy.ndimage.gaussian_filter( input_image, lpSigma ), xcrop, ycrop )
# Crop 1 from each zero-edge and 2 pixels from each end-edge to avoid edge artifacts in fitting procedure
# This is compensated for in the xback, yback meshes below
xmesh = xcrop.astype('float32') / binFact - xcrop.shape[1]/2.0
ymesh = ycrop.astype('float32') / binFact - ycrop.shape[1]/2.0
xmesh = xmesh[1:-2, 1:-2]
ymesh = ymesh[1:-2, 1:-2]
nn_sum = nn_sum[1:-2, 1:-2]
# Maybe you need to add an x_c*y_c term? On experimentation the cross-term doesn't help
def gauss2( p, x_in, y_in ):
x_c = x_in - p[1]
y_c = y_in - p[2]
return p[0] + p[3]*np.exp(-x_c*x_c/p[4]**2 - y_c*y_c/p[5]**2)
def errorGauss2( p, c ):
x_c = xmesh - p[1]
y_c = ymesh - p[2]
return np.sum( np.abs( c - (p[0] + p[3]*np.exp(-x_c*x_c/p[4]**2 - y_c*y_c/p[5]**2) ) ) )
paramMat = np.ones( 6, dtype='float64' )
paramMat[0] = np.mean( nn_sum )
paramMat[3] = np.mean( nn_sum )
paramMat[4] = np.mean( nn_sum ) * np.std( nn_sum )
paramMat[5] = paramMat[4]
fitGauss2D = scipy.optimize.minimize( errorGauss2, paramMat, method='Powell', args=(nn_sum,) )
# fitGauss2D = scipy.optimize.basinhopping( errorGauss2, paramMat, minimizer_kwargs={'args':(nn_sum,), 'method':"Powell"} )
xback, yback = np.meshgrid( np.arange(input_image.shape[1]), np.arange(input_image.shape[0]) )
xback = xback.astype('float32') - input_image.shape[1]/2.0
xback /= binFact
yback = yback.astype('float32') - input_image.shape[0]/2.0
yback /= binFact
if fitGauss2D.success:
back = gauss2( fitGauss2D.x, xback, yback )
return back
else: # Failure, have no effect
print( "Background estimation failed" )
return np.zeros_like( input_image )
def magickernel( imageIn, k=1, direction='down' ):
"""
magickernel( imageIn, k=1, direction='down' )
k = number of binning operations, so k = 3 bins by 8 x 8
Implementation of the magickernel for power of 2 image resampling. Generally
should be used to get two images 'close' in size before using a more aggressive resampling
method like bilinear.
direction is either 'up' (make image 2x bigger) or 'down' (make image 2x smaller)
k is the number of iterations to apply it.
"""
if k > 1:
imageIn = magickernel( imageIn, k=k-1, direction=direction )
if direction == 'up':
h = np.array( [[0.25, 0.75, 0.75, 0.25]] )
h = h* np.transpose(h)
imageOut = np.zeros( [ 2*imageIn.shape[0], 2*imageIn.shape[1] ] )
# Slice the input image interlaced into the larger output image
imageOut[1::2,1::2] = imageIn
# Apply the magic kernel
imageOut = scipy.ndimage.convolve( imageOut, h )
elif direction == 'down':
imageIn = np.pad( imageIn, [1,1], 'reflect' )
h = 0.5*np.array( [[0.25, 0.75, 0.75, 0.25]] )
h = h* np.transpose(h)
# This is computationally a little expensive, we are only using one in four values afterward
imageOut = scipy.ndimage.convolve( imageIn, h)
# Slicing is (start:stop:step)
imageOut = imageOut[0:-2:2,0:-2:2]
else:
return
return imageOut
def squarekernel( imageIn, k=1, direction='down' ):
"""
squarekernel( imageIn, k=1, direction='down' )
k = number of binning operations, so k = 3 bins by 8 x 8
Implementation of a square kernel for power of 2 image resampling, i.e. rebinning
direction is either 'up' (make image 2x bigger) or 'down' (make image 2x smaller)
k is the number of iterations to apply it.
"""
if k > 3:
# We can do this for probably bin-factors of 2,4, and 8?
imageIn = squarekernel( imageIn, k=(k-1), direction=direction )
if k == 1:
h = np.array( [[1.0, 1.0]] )
step = 2
elif k == 2:
h = np.array( [[1.0,1.0,1.0,1.0]] )
step = 4
elif k == 3:
h = np.array( [[1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0]] )
step = 8
h = h * np.transpose(h)
if direction == 'up':
imageOut = np.zeros( [ 2*imageIn.shape[0], 2*imageIn.shape[1] ], dtype=imageIn.dtype )
# Slice the input image interlaced into the larger output image
imageOut[1::2,1::2] = imageIn
# Apply the magic kernel
imageOut = scipy.ndimage.convolve( imageOut, h )
elif direction == 'down':
# This is computationally a little expensive, we are only using one in four values afterward
imageOut = scipy.ndimage.convolve( imageIn, h )
# Slicing is (start:stop:step)
imageOut = imageOut[0:-1:step,0:-1:step]
else:
return
return imageOut
def imHist(imdata, bins_=256):
'''Compute image histogram.
[histIntensity, histX] = imHist( imageData, bins_=256 )
'''
im_values = np.ravel(imdata)
hh, bins_ = np.histogram( im_values, bins=bins_ )
# check histogram format
if len(bins_)==len(hh):
pass
else:
bins_ = bins_[:-1] # 'bins' == bin_edges
return hh, bins_
def histClim( imData, cutoff = 0.01, bins_ = 512 ):
'''Compute display range based on a confidence interval-style, from a histogram
(i.e. ignore the 'cutoff' proportion lowest/highest value pixels)'''
if( cutoff <= 0.0 ):
return imData.min(), imData.max()
# compute image histogram
hh, bins_ = imHist(imData, bins_)
hh = hh.astype( 'float' )
# number of pixels
Npx = np.sum(hh)
hh_csum = np.cumsum( hh )
# Find indices where hh_csum is < and > Npx*cutoff
try:
i_forward = np.argwhere( hh_csum < Npx*(1.0 - cutoff) )[-1][0]
i_backward = np.argwhere( hh_csum > Npx*cutoff )[0][0]
except IndexError:
print( "histClim failed, returning confidence interval instead" )
from scipy.special import erfinv
sigma = np.sqrt(2) * erfinv( 1.0 - cutoff )
return ciClim( imData, sigma )
clim = np.array( [bins_[i_backward], bins_[i_forward]] )
if clim[0] > clim[1]:
clim = np.array( [clim[1], clim[0]] )
return clim
def ciClim( imData, sigma = 2.5 ):
"""
Confidence interval color limits, for images. Most useful for highly spikey data.
"""
meanData = np.mean( imData )
stdData = np.std( imData )
return np.array( [meanData - sigma*stdData, meanData + sigma*stdData] )
def plotScalebar( mage, pixelsize, units='nm', color='r', forceWidth=None ):
"""
Pass in an image objectand a pixelsize, and function will add a properly scaled
scalebar to it.
mage is the return from plt.imshow(), i.e. a matplotlib.
pixelsize is what it says it is
units can be any string
color can be any matplotlib recognize color type (array or string)
forceWidth sets the width to forceWidth units
Note: auto-sets the image extent, do not use with plt.axis('image')
"""
# Figure out scalesize from plot extent
magesize = mage.get_size()
# This is scaling the image!
# mage.set_extent( [0.0, magesize[1]*pixelsize, 0.0, magesize[0]*pixelsize] )
if forceWidth == None:
targetwidth = 0.16
targetValue = targetwidth * magesize[1] * pixelsize
pow10 = np.int( np.floor( np.log10( targetValue ) ) )
scalevalue = np.round( targetValue, decimals=-pow10 )
scalesize = scalevalue / pixelsize
else:
scalevalue = forceWidth
scalesize = forceWidth / pixelsize
# Use %g formatter here.
textscale = r'$%2g'%scalevalue +'\/' + units + "$"
scalebar1 = mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar( mage.axes.transData, scalesize, textscale,
pad=0.2, loc=4, sep=7, borderpad=0.4, frameon=True)
scalebar1.txt_label._text.set_color( color )
scalebar1.txt_label._text.set_weight( 'bold' )
scalebar1.txt_label._text.set_size( 18 )
scalebar1.size_bar.get_children()[0].set( color=color, linewidth=6.0 )
scalebar1.patch.set(alpha=0.66, boxstyle='round')
# plt.gca().add_artist(scalebar1)
mage.axes.add_artist(scalebar1)
plt.pause(0.05) # Often scalebar isn't rendered before plt.show calls.
pass
def plotHistClim( mage, cutoff=1E-3, colorbar=False, cbartitle="" ):
"""
Pass in an image object and a pixelsize, and function will change
mage is the return from plt.imshow(), i.e. a matplotlib.
cutoff is the histogram cutoff passed into histClim
colorbar=True will add a colorbar to the plot
"""
clim = histClim( mage.get_array(), cutoff=cutoff )
mage.set_clim( vmin=clim[0], vmax=clim[1] )
if bool(colorbar):
cbar = plt.colorbar(mage)
cbar.set_label(cbartitle, rotation=270)
pass
############### MISCELLANEOUS ###############
def powerpoly1( x, a1, b1, a2, c1 ):
return a1*(x**b1) + a2*x + c1
def fit( x, y, funchandle='gauss1', estimates=None ):
""" Returns: fitstruct, fitY, Rbest """
from scipy.optimize import curve_fit
from scipy.stats.stats import linregress
if funchandle == 'gauss1':
def fitfunc( x, a1, b1, c1 ):
return a1 * np.exp( -( (x-b1)/ c1)**2 )
# Really arbitrary c1 estimate at basically 25 pixels..
if estimates is None:
estimates = np.array( [np.max(y), x[np.argmax(y)], 25.0*(x[1]-x[0]) ] )
elif funchandle == 'poly1':
def fitfunc( x, a1, b1 ):
return a1 * x + b1
if estimates is None:
slope = (np.max(y)-np.min(y))/(np.max(x)-np.min(x))
intercept = np.min(y) - slope*x[np.argmin(y)]
estimates = [slope, intercept]
elif funchandle == 'poly2':
def fitfunc( x, a1, b1, c1 ):
return a1 * x **2.0 + b1 *x + c1
if estimates is None:
slope = (np.max(y)-np.min(y))/(np.max(x)-np.min(x))
intercept = np.min(y) - slope*x[np.argmin(y)]
estimates = [0.0, slope, intercept]
elif funchandle == 'poly3':
def fitfunc( x, a1, b1, c1, d1 ):
return a1 * x **3.0 + b1 *x**2.0 + c1*x + d1
if estimates is None:
slope = (np.max(y)-np.min(y))/(np.max(x)-np.min(x))
intercept = np.min(y) - slope*x[np.argmin(y)]
estimates = [0.0, 0.0, slope, intercept]
elif funchandle == 'poly5':
def fitfunc( x, a1, b1, c1, d1, e1, f1 ):
return a1 * x **5.0 + b1 *x**4.0 + c1*x**3.0 + d1*x**2.0 + e1*x + f1
if estimates is None:
slope = (np.max(y)-np.min(y))/(np.max(x)-np.min(x))
intercept = np.min(y) - slope*x[np.argmin(y)]
estimates = [0.0, 0.0, 0.0, 0.0, slope, intercept]
elif funchandle == 'abs1':
def fitfunc( x, a1 ):
return a1 * np.abs( x )
if estimates is None:
estimates = np.array( [ (np.max(y)-np.min(y))/(np.max(x)-np.min(x))])
elif funchandle == 'exp':
def fitfunc( x, a1, c1 ):
return a1 * np.exp( c1*x )
if estimates is None:
estimates = np.array( [1.0, -1.0] )
elif funchandle == 'expc':
def fitfunc( x, a1, c1, d1 ):
return a1 * np.exp( c1*x ) + d1
if estimates is None:
estimates = np.array( [1.0, -1.0, 1.0] )
elif funchandle == 'power1':
def fitfunc( x, a1, b1 ):
return a1*(x**b1)
if estimates is None:
estimates = np.array( [1.0, -2.0] )
elif funchandle == 'power2':
def fitfunc( x, a1, b1, c1 ):
return a1*(x**b1) + c1
if estimates is None:
estimates = np.array( [1.0, -2.0, 1.0] )
elif funchandle == 'powerpoly1':
def fitfunc( x, a1, b1, a2, c1 ):
return a1*(x**b1) + a2*x + c1
if estimates == None:
estimates = np.array( [1.0, -2.0, 0.0, 1.0] )
else:
fitfunc = funchandle
try:
fitstruct, pcov = curve_fit( fitfunc, x, y, p0=estimates )
perr = np.sqrt(np.diag(pcov))
print( "Fitting completed with perr = " + str(perr) )
fitY = fitfunc( x, *fitstruct )
goodstruct = linregress( x, fitfunc( x, *fitstruct ) )
Rbest = goodstruct[2]
except RuntimeError:
print( "RAM: Curve fitting failed")
return
return fitstruct, fitY, Rbest
def guessCfgType( value ):
# For guessing the data type (bool, integer, float, or string only) from ConfigParser
if value.lower() == 'true':
return True
if value.lower() == 'false':
return False
try:
value = np.int( value )
return value
except:
pass
try:
value = np.float32( value )
return value
except:
pass
return value
def weightedErrorNorm( x, A, b, weights ):
# weighted error to set of shifts
return np.sum( weights * np.abs(np.dot( A, x) - b) )
def errorNorm( x, A, b ):
# No damping
return np.sum( np.abs(np.dot( A, x) - b) )
# Fit a logistical curve to the sigmoid... and use that as the weighting curve???
def logistic( peaksAxis, SigmaThres, K, Nu):
return 1.0 - 1.0 / (1.0 + np.exp( -K*(-peaksAxis + SigmaThres) ) )**Nu
# So peaksig_axis and cdf are immutable, only K, SigmaThres, and Nu should be optimized
def minLogistic( x, hSigma, cdfPeaksig ):
return np.float32( np.sum( np.abs( cdfPeaksig - (1.0 - 1.0 / (1.0 + np.exp( -x[1]*(-hSigma + x[0]) ) )**np.float64(x[2]) )) ) )
def which( program ):
# Tries to locate a program
import os
if os.name == 'nt':
program_ext = os.path.splitext( program )[1]
if program_ext == "":
prog_exe = which( program + ".exe" )
if prog_exe != None:
return prog_exe
return which( program + ".com" )
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def compressFile( filename, compress_ext = '.bz2', outputDir = None, n_threads=None ):
if os.path.isfile( filename + compress_ext ):
os.remove( filename + compress_ext )
if n_threads == None:
# print( "Warning: zorro_util.compressFile defaulting to 8 threads" )
n_threads = 8
relpath, basename = os.path.split( filename )
if outputDir == None:
outputDir = relpath
newFilename = os.path.join( outputDir, basename + compress_ext )
# WTH is going on here... why is lbzip2 crapping out?
if compress_ext == '.bz2' and which( 'lbzip2' ) != None:
sub = subprocess.Popen( "lbzip2 -n %d"%n_threads +" -1 -c " + filename + " > " + newFilename, shell=True )
elif compress_ext == '.gz' and which( 'pigz' ) != None:
sub = subprocess.Popen( "pigz -p %d"%n_threads + " -1 -c " + filename + " > " + newFilename, shell=True )
elif which( '7z' ) != None:
if compress_ext == '.bz2':
sub = subprocess.Popen( "7z a -tbzip2 " + newFilename + " " + filename, shell=True )
elif compress_ext == '.gz':
sub = subprocess.Popen( "7z a -tgzip " + newFilename + " " + filename, shell=True )
else:
print( "Warning: cannot compress " + filename + compress_ext )
return
while sub.wait(): pass
#print( "compressFile: Trying to remove: " + filename )
os.remove( filename )
return newFilename
def decompressFile( filename, outputDir = None, n_threads = None ):
relpath, file_front = os.path.split( filename )
[file_front, file_ext] = os.path.splitext( file_front )
file_ext = file_ext.lower()
if n_threads == None:
# print( "Warning: zorro_util.decompressFile defaulting to 8 threads" )
n_threads = 8
if outputDir == None:
outputDir = relpath
newFilename = os.path.join(outputDir,file_front)
#### COMPRESSED FILES ####
if file_ext == '.bz2':
if which('lbzip2') != None:
sub = subprocess.Popen( "lbzip2 -n %d"%n_threads +" -d -c " + filename +" > " + newFilename, shell=True ) # File is now decompressed
elif which('7z') != None:
sub = subprocess.Popen( "7z e -o" + outputDir + " " + filename, shell=True )
else:
print( "Neither lbzip2 nor 7z found in path, cannot decompress files.")
return filename
elif file_ext == '.gz':
if which('pigz') != None:
sub = subprocess.Popen( "pigz -p %d"%n_threads +" -d -c " + filename + " > " + newFilename, shell=True ) # File is now decompressed
# Make new filename from filefront
elif which( '7z' ) != None:
sub = subprocess.Popen( "7z e -o" + outputDir + " " + filename, shell=True )
else:
print( "Neither pigz nor 7z found in path, cannot decompress files.")
return filename
else:
# Do nothing
return filename
# We can only get here if
while sub.wait(): pass
#print( "decompressFile: Trying to remove: " + filename )
os.remove( filename ) # Remove original, because redirects trn -k on.
return newFilename # Make new filename from filefront
############### pyFFTW interface ###############
def pyFFTWPlanner( realMage, fouMage=None, wisdomFile = None, effort = 'FFTW_MEASURE', n_threads = None, doForward = True, doReverse = True ):
"""
Appends an FFTW plan for the given realMage to a text file stored in the same
directory as RAMutil, which can then be loaded in the future with pyFFTWLoadWisdom.
NOTE: realMage should be typecast to 'complex64' normally.
NOTE: planning pickle files are hardware dependant, so don't copy them from one
machine to another. wisdomFile allows you to specify a .pkl file with the wisdom
tuple written to it. The wisdomFile is never updated, whereas the default
wisdom _is_ updated with each call. For multiprocessing, it's important to
let FFTW generate its plan from an ideal processor state.
TODO: implement real, half-space fourier transforms rfft2 and irfft2 as built
"""
import pyfftw
import pickle
import os.path
from multiprocessing import cpu_count
utilpath = os.path.dirname(os.path.realpath(__file__))
# First import whatever we already have
if wisdomFile is None:
wisdomFile = os.path.join( utilpath, "pyFFTW_wisdom.pkl" )
if os.path.isfile(wisdomFile):
try:
fh = open( wisdomFile, 'rb')
except:
print( "Util: pyFFTW wisdom plan file: " + str(wisdomFile) + " invalid/unreadable" )
try:
pyfftw.import_wisdom( pickle.load( fh ) )
except:
# THis is not normally a problem, it might be empty?
print( "Util: pickle failed to import FFTW wisdom" )
pass
try:
fh.close()
except:
pass
else:
# Touch the file
os.umask(0000) # Everyone should be able to delete scratch files
with open( wisdomFile, 'wb') as fh:
pass
# I think the fouMage array has to be smaller to do the real -> complex FFT?
if fouMage is None:
if realMage.dtype.name == 'float32':
print( "pyFFTW is recommended to work on purely complex data" )
fouShape = realMage.shape
fouShape.shape[-1] = realMage.shape[-1]//2 + 1
fouDtype = 'complex64'
fouMage = np.empty( fouShape, dtype=fouDtype )
elif realMage.dtype.name == 'float64':
print( "pyFFTW is recommended to work on purely complex data" )
fouShape = realMage.shape
fouShape.shape[-1] = realMage.shape[-1]//2 + 1
fouDtype = 'complex128'
fouMage = np.empty( fouShape, dtype=fouDtype )
else: # Assume dtype is complexXX
fouDtype = realMage.dtype.name
fouMage = np.zeros( realMage.shape, dtype=fouDtype )
if n_threads is None:
n_threads = cpu_count()
print( "FFTW using " + str(n_threads) + " threads" )
if bool(doForward):
#print( "Planning forward pyFFTW for shape: " + str( realMage.shape ) )
FFT2 = pyfftw.builders.fft2( realMage, planner_effort=effort,
threads=n_threads, auto_align_input=True )
else:
FFT2 = None
if bool(doReverse):
#print( "Planning reverse pyFFTW for shape: " + str( realMage.shape ) )
IFFT2 = pyfftw.builders.ifft2( fouMage, planner_effort=effort,
threads=n_threads, auto_align_input=True )
else:
IFFT2 = None
# Setup so that we can call .execute on each one without re-copying arrays
# if FFT2 is not None and IFFT2 is not None:
# FFT2.update_arrays( FFT2.get_input_array(), IFFT2.get_input_array() )
# IFFT2.update_arrays( IFFT2.get_input_array(), FFT2.get_input_array() )
# Something is different in the builders compared to FFTW directly.
# Can also repeat this for pyfftw.builders.rfft2 and .irfft2 if desired, but
# generally it seems slower.
# Opening a file for writing is supposed to truncate it
# if bool(savePlan):
#if wisdomFile is None:
# with open( utilpath + "/pyFFTW_wisdom.pkl", 'wb') as fh:
with open( wisdomFile, 'wb' ) as fh:
pickle.dump( pyfftw.export_wisdom(), fh )
return FFT2, IFFT2
def findValidFFTWDim( inputDims ):
"""
Finds a valid dimension for which FFTW can optimize its calculations. The
return is a shape which is forced to be square, as this gives uniform pixel
size in x-y in Fourier space.
If you want a minimum padding size, call as findValidFFTWDim( image.shape + 128 )
or similar.
"""
dim = np.max( np.round( inputDims ) )
maxPow2 = np.int( np.ceil( math.log( dim, 2 ) ) )
maxPow3 = np.int( np.ceil( math.log( dim, 3 ) ) )
maxPow5 = np.int( np.ceil( math.log( dim, 5 ) ) )
maxPow7 = np.int( np.ceil( math.log( dim, 7 ) ) )
dimList = np.zeros( [(maxPow2+1)*(maxPow3+1)*(maxPow5+1)*(maxPow7+1)] )
count = 0
for I in np.arange(0,maxPow7+1):
for J in np.arange(0,maxPow5+1):
for K in np.arange(0,maxPow3+1):
for L in np.arange(0,maxPow2+1):
dimList[count] = 2**L * 3**K * 5**J * 7**I
count += 1
dimList = np.sort( np.unique( dimList ) )
dimList = dimList[ np.argwhere(dimList < 2*dim)].squeeze()
dimList = dimList.astype('int64')
# Throw out odd image shapes, this just causes more problems with many
# functions
dimList = dimList[ np.mod(dimList,2)==0 ]
# Find first dim that equals or exceeds dim
nextValidDim = dimList[np.argwhere( dimList >= dim)[0,0]]
return np.array( [nextValidDim, nextValidDim] ) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/zorro_util.py | zorro_util.py |
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
if np.version.version.split('.')[1] == 7:
print( "WARNING: NUMPY VERSION 1.7 DETECTED, ZORRO IS DESIGNED FOR >1.10" )
print( "CHECK YOUR ENVIRONMENT VARIABLES TO SEE IF EMAN2 HAS HIJACKED YOUR PYTHON DISTRIBUTION" )
import numexprz as nz
# Now see which numexpr we have, by the dtype of float (whether it casts or not)
try:
# Now see which numexpr we have, by the dtype of float (whether it casts or not)
tdata = np.complex64( 1.0 + 2.0j )
fftw_dtype = nz.evaluate( 'tdata + tdata' ).dtype
float_dtype = nz.evaluate( 'real(tdata+tdata)' ).dtype
except:
fftw_dtype = 'complex128'
float_dtype = 'float64'
import scipy.optimize
import scipy.ndimage
import scipy.stats
import time
try:
import ConfigParser as configparser
except:
import configparser # Python 3
# Here we have to play some games depending on where the file was called from
# with the use of absolute_import
# print( "__name__ of zorro: " + str(__name__) )
try:
import zorro_util as util
import zorro_plotting as plot
except ImportError:
from . import zorro_util as util
from . import zorro_plotting as plot
import mrcz
import os, os.path, tempfile, sys
import subprocess
# Should we disable Multiprocessing on Windows due to general bugginess in the module?
import multiprocessing as mp
try:
import pyfftw
except:
print( "Zorro did not find pyFFTW package: get it at https://pypi.python.org/pypi/pyFFTW" )
try:
import tables
except:
print( "Zorro did not find pyTables installation for HDF5 file support" )
import matplotlib.pyplot as plt
# Numpy.pad is bad at dealing with interpreted strings
if sys.version_info >= (3,0):
symmetricPad = u'symmetric'
constantPad = u'constant'
else:
symmetricPad = b'symmetric'
constantPad = b'constant'
#### OBJECT-ORIENTED INTERFACE ####
class ImageRegistrator(object):
# Should be able to handle differences in translation, rotation, and scaling
# between images
def __init__( self ):
# Declare class members
self.verbose = 0
self.umask = 2
# Meta-information for processing, not saved in configuration files.
self.METApriority = 0.0
self.METAstatus = u'new'
self.METAmtime = 0.0
self.METAsize = 0
self.xcorrMode = 'zorro' # 'zorro', 'unblur v1.02', 'motioncorr v2.1'
# FFTW_PATIENT is bugged for powers of 2, so use FFTW_MEASURE as default
self.fftw_effort = u"FFTW_MEASURE"
# TODO: change this to drop into cachePath
self.n_threads = nz.nthreads # Number of cores to limit FFTW to, if None uses all cores
self.cachePath = tempfile.gettempdir()
# CALIBRATIONS
self.pixelsize = None # Typically we use nanometers, the same units as Digital Micrograph
self.voltage = 300.0 # Accelerating voltage, kV
self.C3 = 2.7 # Spherical aberration of objective, mm
self.gain = None
self.detectorPixelSize = None # Physical dimensions of detector pixel (5 um for K2)
# Timings
self.bench = {} # Dict holds various benchmark times for the code
self.saveC = False # Save the cross-correlation within +/- maxShift
# INFORMATION REDUCTION
# The SNR at high spatial frequencies tends to be lower due to how information transfer works, so
# removing/filtering those frequencies can improve stability of the registration. YMMV, IMHO, etc.
self.Brad = 512 # Gaussian low-pass applied to data before registration, units are radius in Fourier space, or equivalent point-spread function in real-space
self.Bmode = u'opti' # can be a real-space Gaussian convolution, 'conv' or Fourier filter, 'fourier', or 'opti' for automatic Brad
# For Bmode = 'fourier', a range of available filters can be used: gaussian, gauss_trunc, butterworth.order (order is an int), hann, hamming
self.BfiltType = u'gaussian'
self.fouCrop = [3072,3072] # Size of FFT in frequency-space to crop to (e.g. [2048,2048])
self.reloadData = True
# Data
self.images = None
self.imageSum = None
self.filtSum = None # Dose-filtered, Wiener-filtered, etc. representations go here
self.gainRef = None # For application of gain reference in Zorro rather than Digital Micrograph/TIA/etc.
self.gainInfo = {
"Horizontal": True, "Vertical": True, "Diagonal":False,
"GammaParams": [ 0.12035633, -1.04171635, -0.03363192, 1.03902726],
}
# One of None, 'dose', 'dose,background', 'dosenorm', 'gaussLP', 'gaussLP,background'
# also 'hot' can be in the comma-seperated list for pre-filtering of hot pixels
self.filterMode = None
# Dose filt param = [dosePerFrame, critDoseA, critDoseB, critDoseC, cutoffOrder, missingStartFrame]
self.doseFiltParam = [None, 0.24499, -1.6649, 2.8141, 32, 0]
# for 'hot' in filterMode
self.hotpixInfo = { u"logisticK":6.0, u"relax":0.925, u"maxSigma":8.0, u"psf": u"K2",
u"guessHotpix":0, u"guessDeadpix":0, u"decorrOutliers":False,
u"cutoffLower":-4.0, u"cutoffUpper":3.25, u"neighborPix":0 }
self.FFTSum = None
# If you want to use one mask, it should have dims [1,N_Y,N_X]. This is
# to ensure Cythonized code can interact safely with Numpy
self.incohFouMag = None # Incoherent Fourier magnitude, for CTF determination, resolution checks
self.masks = None
self.maskSum = None
self.C = None
# Results
self.translations = None
self.transEven = None # For even-odd tiled FRC, the half-stack translations
self.transOdd = None # For even-odd tiled FRC, the half-stack translations
self.velocities = None # pixel velocity, in pix/frame, to find frames that suffer from excessive drift
self.rotations = None # rotations, for polar-transformed data
self.scales = None # scaling, for polar-transformed data
self.errorDictList = [] # A list of dictionaries of errors and such from different runs on the same data.
self.trackCorrStats = False
self.corrStats = None
self.doLazyFRC = True
self.doEvenOddFRC = False
self.FRC = None # A Fourier ring correlation
# Filtering
# TODO: add more fine control over filtering options
# CTF currently supports CTFFIND4.1 or GCTF
self.CTFProgram = None # None, "ctffind4.1", or "gctf", 'ctffind4.1,sum' works on (aligned) sum, same for 'gctf,sum'
self.CTFInfo = { u'DefocusU':None, u'DefocusV': None, u'DefocusAngle':None, u'CtfFigureOfMerit':None,
u'FinalResolution': None, u'AmplitudeContrast':0.07, u'AdditionalPhaseShift':None,
}
self.CTFDiag = None # Diagnostic image from CTFFIND4.1 or GCTF
# DEPRICATED ctf stuff
#self.doCTF = False
#self.CTF4Results = None # Micrograph number, DF1, DF2, Azimuth, Additional Phase shift, CC, and max spacing fit-to
#self.CTF4Diag = None
# Registration parameters
self.shapePadded = [4096,4096]
self.shapeOriginal = None
self.shapeBinned = None
self.subPixReg = 16 # fraction of a pixel to REGISTER image shift to
# Subpixel alignment method: None (shifts still registered subpixally), lanczos, or fourier
# lanczos is cheaper computationally and has fewer edge artifacts
self.shiftMethod = u'lanczos'
self.maxShift = 100 # Generally should be 1/2 distance to next lattice spacing
# Pre-shift every image by that of the previous frame, useful for high-resolution where one can jump a lattice
# i.e. should be used with small values for maxShift
self.preShift = False
# Solver weighting can be raw max correlation coeffs (None), normalized to [0,1] by the
# min and max correlations ('norm'), or 'logistic' function weighted which
# requires corrThres to be set.
self.peakLocMode = u'interpolated' # interpolated (oversampled), or a RMS-best fit like fitlaplacian
self.weightMode = u'autologistic' # autologistic, normalized, unweighted, logistic, or corr
self.peaksigThres = 6.0
self.logisticK = 5.0
self.logisticNu = 0.15
self.originMode = u'centroid' # 'centroid' or None
self.suppressOrigin = True # Delete the XC pixel at (0,0). Only necessary if gain reference is bad, but defaults to on.
# Triangle-matrix indexing parameters
self.triMode = u'diag' # Can be: tri, diag, auto, first
self.startFrame = 0
self.endFrame = 0
self.diagStart = 0 # XC to neighbour frame on 0, next-nearest neighbour on +1, etc.
self.diagWidth = 5
self.autoMax = 10
self.corrThres = None # Use with 'auto' mode to stop doing cross-correlations if the values drop below the threshold
self.velocityThres = None # Pixel velocity threshold (pix/frame), above which to throw-out frames with too much motion blur.
#### INPUT/OUTPUT ####
self.files = { u"config":None, u"stack":None, u"mask":None, u"sum":None,
u"align":None, u"figurePath":None, u"xc":None,
u"moveRawPath":None, u"original":None, u"gainRef":None,
u"stdout": None, u"automatch":None, u"rejected":None,
u"compressor": None, u"clevel": 1 }
#self.savePDF = False
self.savePNG = True
self.saveMovie = True
self.doCompression = False
self.compress_ext = ".bz2"
#### PLOTTING ####
self.plotDict = { u"imageSum":True, u"imageFirst":False, u"FFTSum":True, u"polarFFTSum":True,
u"filtSum":True, u'stats': False,
u"corrTriMat":False, u"peaksigTriMat": True,
u"translations":True, u"pixRegError":True,
u"CTFDiag":True, u"logisticWeights": True, u"FRC": True,
u'Transparent': True, u'plot_dpi':144, u'image_dpi':250,
u'image_cmap':u'gray', u'graph_cmap':u'gnuplot',
u'fontsize':12, u'fontstyle': u'serif', u'colorbar': True,
u'backend': u'Qt4Agg', u'multiprocess':True,
u'show':False }
pass
def initDefaultFiles( self, stackName ):
self.files[u'stack'] = stackName
self.files[u'config'] = stackName + u".zor"
stackPath, stackFront = os.path.split( stackName )
stackFront = os.path.splitext( stackFront )[0]
if not 'compressor' in self.files or not bool(self.files['compressor']):
mrcExt = ".mrc"
mrcsExt = ".mrcs"
else:
mrcExt = ".mrcz"
mrcsExt = ".mrcsz"
self.files[u'align'] = os.path.relpath(
os.path.join( u"./align", "%s_zorro_movie%s" %(stackFront, mrcsExt) ),
start=stackPath )
self.files[u'sum'] = os.path.relpath( stackPath,
os.path.join( u"./sum", "%s_zorro%s" %(stackFront, mrcExt) ),
start=stackPath )
self.files[u'figurePath'] = os.path.relpath(
os.path.join(stackPath, u"./figs"), start=stackPath )
def xcorr2_mc2_1( self, gpu_id = 0, loadResult=True, clean=True ):
"""
This makes an external operating system call to the Cheng's lab GPU-based
B-factor multireference executable. It and CUDA libraries must be on the system
path and libary path respectively.
NOTE: Spyder looks loads PATH and LD_LIBRARY_PATH from .profile, not .bashrc
"""
dosef_cmd = util.which("dosefgpu_driftcorr")
if dosef_cmd is None:
print( "Error: dosefgpu_driftcorr not found in system path." )
return
#tempFileHash = str(uuid.uuid4() ) # Key let's us multiprocess safely
stackBase = os.path.basename( os.path.splitext( self.files['stack'] )[0] )
if self.cachePath is None:
self.cachePath = "."
InName = os.path.join( self.cachePath, stackBase + u"_mcIn.mrc" )
# Unfortunately these files may as well be in the working directory.
OutAvName = os.path.join( self.cachePath, stackBase + u"_mcOutAv.mrc" )
OutStackName = os.path.join( self.cachePath, stackBase + u"_mcOut.mrc" )
logName = os.path.join( self.cachePath, stackBase + u"_mc.zor" )
mrcz.writeMRC( self.images, InName )
# Force binning to 1, as performance with binning is poor
binning = 1
if self.Brad is not None:
# Li masking is in MkPosList() in cufunc.cu (line 413)
# Their r2 is normalized and mine isn't
# Li has mask = exp( -0.5 * bfactor * r_norm**2 )
# r_norm**2 = x*x/Nx*Nx + y*y/Ny*Ny = r**2 / (Nx**2 + Ny**2)
# For non-square arrays they have a non-square (but constant frequency) filter
# RAM has mask = exp( -(r/brad)**2 )
# We can only get Bfactor approximately then but it's close enough for 3710x3838
bfac = 2.0 * (self.images.shape[1]**2 + self.images.shape[2]**2) / (self.Brad**2)
print( "Using B-factor of " + str(bfac) + " for dosefgpu_driftcorr" )
else:
bfac = 1000 # dosef default 'safe' bfactor for mediocre gain reference
# Consider: Dosef suffers at the ends of the sequence, so make the middle frame zero drift?
# align_to = np.floor( self.images.shape[0]/2 )
# This seems to cause more problems then it's worth.
align_to = 0
if self.diagWidth != None:
fod = self.diagWidth
else:
fod = 0
# Dosef can limit search to a certain box size
if self.maxShift == None:
maxshift = 96
else:
maxshift = self.maxShift * 2
if self.startFrame == None:
self.startFrame = 0
if self.endFrame == None:
self.endFrame = 0
motion_flags = ( " " + InName
+ " -gpu " + str(gpu_id)
+ " -nss " + str(self.startFrame)
+ " -nes " + str(self.endFrame)
+ " -fod " + str(fod)
+ " -bin " + str(binning)
+ " -bft " + str(bfac)
+ " -atm -" + str(align_to)
+ " -pbx " + str(maxshift)
+ " -ssc 1 -fct " + OutStackName
+ " -fcs " + OutAvName
+ " -flg " + logName )
sub = subprocess.Popen( dosef_cmd + motion_flags, shell=True )
sub.wait()
self.loadMCLog( logName )
time.sleep(0.5)
if bool(clean):
try: os.remove(InName)
except: pass
try: os.remove(OutStackName)
except: pass
try: os.remove(OutAvName)
except: pass
try: os.remove(logName)
except: pass
def loadMCLog( self, logName ):
"""
Load and part a MotionCorr log from disk using regular expressions.
"""
import re
# Parse to get the translations
fhMC = open( logName )
MClog = fhMC.readlines()
fhMC.close()
# Number of footer lines changes with the options you use.
# I would rather find Sum Frame #000
for linenumber, line in enumerate(MClog):
try:
test = re.findall( "Sum Frame #000", line)
if bool(test):
frameCount = np.int( re.findall( "\d\d\d", line )[1] ) + 1
break
except: pass
MClog_crop = MClog[linenumber+1:linenumber+frameCount+1]
MCdrifts = np.zeros( [frameCount,2] )
for J in np.arange(0,frameCount):
MCdrifts[J,:] = re.findall( r"([+-]?\d+.\d+)", MClog_crop[J] )[1:]
# Zorro saves translations, motioncorr saves shifts.
self.translations = -np.fliplr( MCdrifts )
if self.originMode == u'centroid':
centroid = np.mean( self.translations, axis=0 )
self.translations -= centroid
def xcorr2_unblur1_02( self, dosePerFrame = None, minShift = 2.0, terminationThres = 0.1,
maxIteration=10, verbose=False, loadResult=True, clean=True ):
"""
Calls UnBlur by Grant and Rohou using the Zorro interface.
"""
self.bench['unblur0'] = time.time()
unblur_exename = "unblur_openmp_7_17_15.exe"
if util.which( unblur_exename ) is None:
print( "UnBlur not found in system path" )
return
print( "Calling UnBlur for " + self.files['stack'] )
print( " written by Timothy Grant and Alexis Rohou: http://grigoriefflab.janelia.org/unblur" )
print( " http://grigoriefflab.janelia.org/node/4900" )
import os
try: os.umask( self.umask ) # Why is Python not using default umask from OS?
except: pass
if self.cachePath is None:
self.cachePath = "."
# Force trailing slashes onto cachePatch
stackBase = os.path.basename( os.path.splitext( self.files[u'stack'] )[0] )
frcOutName = os.path.join( self.cachePath, stackBase + u"_unblur_frc.txt" )
shiftsOutName = os.path.join( self.cachePath, stackBase + u"_unblur_shifts.txt" )
outputAvName = os.path.join( self.cachePath, stackBase + u"_unblur.mrc" )
outputStackName = os.path.join( self.cachePath, stackBase + u"_unblur_movie.mrc" )
ps = self.pixelsize * 10.0
if 'dose' in self.filterMode:
doDoseFilter = True
if dosePerFrame == None:
# We have to guesstimate the dose per frame in e/A^2 if it's not provided
dosePerFrame = np.mean( self.images ) / (ps*ps)
preExposure = 0.0
if 'dosenorm' in self.filterMode:
restoreNoise=True
else:
restoreNoise=False
else:
doDoseFilter = False
if self.Brad is not None:
# Li masking is in MkPosList() in cufunc.cu (line 413)
# Their r2 is normalized and mine isn't
# Li has mask = exp( -0.5 * bfactor * r_norm**2 )
# r_norm**2 = x*x/Nx*Nx + y*y/Ny*Ny = r**2 / (Nx**2 + Ny**2)
# For non-square arrays they have a non-square (but constant frequency) filter
# RAM has mask = exp( -(r/brad)**2 )
# We can only get Bfactor approximately then but it's close enough for 3710x3838
bfac = 2.0 * (self.images.shape[1]**2 + self.images.shape[2]**2) / (self.Brad**2)
print( "Using B-factor of " + str(bfac) + " for UnBlur" )
else:
bfac = 1500 # dosef default 'safe' bfactor for mediocre gain reference
outerShift = self.maxShift * ps
# RAM: I see no reason to let people change the Fourier cross masking
vertFouMaskHW = 1
horzFouMaskHW = 1
try:
mrcName = os.path.join( self.cachePath, stackBase + "_unblurIN.mrc" )
mrcz.writeMRC( self.images, mrcName )
except:
print( "Error in exporting MRC file to UnBlur" )
return
# Are there flags for unblur? Check the source code.
flags = "" # Not using any flags
unblurexec = ( unblur_exename + " " + flags + " << STOP_PARSING \n" + mrcName )
unblurexec = (unblurexec + "\n" + str(self.images.shape[0]) + "\n" +
outputAvName + "\n" + shiftsOutName + "\n" + str(ps) + "\n" +
str(doDoseFilter) )
if bool(doDoseFilter):
unblurexec += "\n" + str(dosePerFrame) + "\n" + str(self.voltage) + "\n" + str(preExposure)
unblurexec += ("\n yes \n" + outputStackName + "\n yes \n" +
frcOutName + "\n" + str(minShift) + "\n" + str(outerShift) + "\n" +
str(bfac) + "\n" + str( np.int(vertFouMaskHW) ) + "\n" + str( np.int(horzFouMaskHW) ) + "\n" +
str(terminationThres) + "\n" + str(maxIteration) )
if bool(doDoseFilter):
unblurexec += "\n" + str(restoreNoise)
unblurexec += "\n" + str(verbose)
unblurexec = unblurexec + "\nSTOP_PARSING"
print( unblurexec )
sub = subprocess.Popen( unblurexec, shell=True )
sub.wait()
try:
# Their FRC is significantly different from mine.
self.FRC = np.loadtxt(frcOutName, comments='#', skiprows=0 )
self.translations = np.loadtxt( shiftsOutName, comments='#', skiprows=0 ).transpose()
# UnBlur uses Fortran ordering, so we need to swap y and x for Zorro C-ordering
self.translations = np.fliplr( self.translations )
# UnBlur returns drift in Angstroms
self.translations /= ps
# UnBlur registers to middle frame
self.translations -= self.translations[0,:]
if bool( loadResult ):
print( "Loading UnBlur aligned frames into ImageRegistrator.images" )
if 'dose' in self.filterMode:
# TODO: WHow to get both filtered images and unfiltered?
self.imageSum = mrcz.readMRC( outputAvName )[0]
else:
self.imageSum = mrcz.readMRC( outputAvName )[0]
# TODO: We have a bit of an issue, this UnBlur movie is dose filtered...
self.images = mrcz.readMRC( outputStackName )[0]
except IOError:
print( "UnBlur likely core-dumped, try different input parameters?" )
finally:
time.sleep(0.5) # DEBUG: try and see if temporary files are deleteable now.
frcOutName = os.path.join( self.cachePath, stackBase + "_unblur_frc.txt" )
shiftsOutName = os.path.join( self.cachePath, stackBase + "_unblur_shifts.txt" )
outputAvName = os.path.join( self.cachePath, stackBase + "_unblur.mrc" )
outputStackName = os.path.join( self.cachePath, stackBase + "_unblur_movie.mrc" )
pass
if self.originMode == 'centroid':
centroid = np.mean( self.translations, axis=0 )
self.translations -= centroid
time.sleep(0.5)
if bool(clean):
try: os.remove( mrcName )
except: print( "Could not remove Unblur MRC input file" )
try: os.remove( frcOutName )
except: print( "Could not remove Unblur FRC file" )
try: os.remove( shiftsOutName )
except: print( "Could not remove Unblur Shifts file" )
try: os.remove( outputAvName )
except: print( "Could not remove Unblur MRC average" )
try: os.remove( outputStackName )
except: print( "Could not remove Unblur MRC stack" )
self.bench['unblur1'] = time.time()
def __init_xcorrnm2( self, triIndices=None ):
"""
"""
self.bench['xcorr0'] = time.time()
shapeImage = np.array( [self.images.shape[1], self.images.shape[2]] )
self.__N = np.asarray( self.images.shape )[0]
if self.preShift:
print( "Warning: Preshift will break if there are skipped frames in a triIndices row." )
# Test to see if triIndices is a np.array or use self.triMode
if hasattr( triIndices, "__array__" ): # np.array
# Ensure triIndices is a square array of the right size
if triIndices.shape[0] != self.__N or triIndices.shape[1] != self.__N:
raise IndexError("triIndices is wrong size, should be of length: " + str(self.__N) )
elif triIndices is None:
[xmesh, ymesh] = np.meshgrid( np.arange(0,self.__N), np.arange(0,self.__N) )
trimesh = xmesh - ymesh
# Build the triMat if it wasn't passed in as an array
if( self.triMode == 'first' ):
print( "Correlating in template mode to first image" )
triIndices = np.ones( [1,self.__N], dtype='bool' )
triIndices[0,0] = False # Don't autocorrelate the first frame.
elif( self.triMode == u'diag' ):
if (self.diagWidth is None) or (self.diagWidth < 0):
# For negative numbers, align the entire triangular matrix
self.diagWidth = self.__N
triIndices = (trimesh <= self.diagWidth + self.diagStart ) * (trimesh > self.diagStart )
print( "Correlating in diagonal mode with width " + str(self.diagWidth) )
elif( self.triMode == u'autocorr' ):
triIndices = (trimesh == 0)
elif( self.triMode == u'refine' ):
triIndices = trimesh == 0
else: # 'tri' or 'auto' ; default is an upper triangular matrix
triIndices = trimesh >= 1
pass
else:
raise TypeError( "Error: triIndices not recognized as valid: " + str(triIndices) )
if self.masks is None or self.masks == []:
print( "Warning: No mask not recommened with MNXC-style correlation" )
self.masks = np.ones( [1,shapeImage[0],shapeImage[1]], dtype = self.images.dtype )
if( self.masks.ndim == 2 ):
self.masks = np.reshape( self.masks.astype(self.images.dtype), [1,shapeImage[0],shapeImage[1]] )
# Pre-loop allocation
self.__shiftsTriMat = np.zeros( [self.__N,self.__N,2], dtype=float_dtype ) # Triagonal matrix of shifts in [I,J,(y,x)]
self.__corrTriMat = np.zeros( [self.__N,self.__N], dtype=float_dtype ) # Triagonal matrix of maximum correlation coefficient in [I,J]
self.__peaksigTriMat = np.zeros( [self.__N,self.__N], dtype=float_dtype ) # Triagonal matrix of correlation peak contrast level
self.__originTriMat= np.zeros( [self.__N,self.__N], dtype=float_dtype ) # Triagonal matrix of origin correlation coefficient in [I,J]
# Make pyFFTW objects
if not bool( np.any( self.fouCrop ) ):
self.__tempFullframe = np.empty( shapeImage, dtype=fftw_dtype )
self.__FFT2, self.__IFFT2 = util.pyFFTWPlanner( self.__tempFullframe, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ), effort = self.fftw_effort, n_threads=self.n_threads )
self.__shapeCropped = shapeImage
self.__tempComplex = np.empty( self.__shapeCropped, dtype=fftw_dtype )
else:
self.__tempFullframe = np.empty( shapeImage, dtype=fftw_dtype )
self.__FFT2, _ = util.pyFFTWPlanner( self.__tempFullframe, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , effort = self.fftw_effort, n_threads=self.n_threads, doReverse=False )
# Force fouCrop to multiple of 2
self.__shapeCropped = 2 * np.floor( np.array( self.fouCrop ) / 2.0 ).astype('int')
self.__tempComplex = np.empty( self.__shapeCropped, dtype=fftw_dtype )
_, self.__IFFT2 = util.pyFFTWPlanner( self.__tempComplex, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , effort = self.fftw_effort, n_threads=self.n_threads, doForward=False )
self.__shapeCropped2 = (np.array( self.__shapeCropped) / 2.0).astype('int')
self.__templateImageFFT = np.empty( self.__shapeCropped, dtype=fftw_dtype )
self.__templateSquaredFFT = np.empty( self.__shapeCropped, dtype=fftw_dtype )
self.__templateMaskFFT = np.empty( self.__shapeCropped, dtype=fftw_dtype )
self.__tempComplex2 = np.empty( self.__shapeCropped, dtype=fftw_dtype )
# Subpixel initialization
# Ideally subPix should be a power of 2 (i.e. 2,4,8,16,32)
self.__subR = 8 # Sampling range around peak of +/- subR
if self.subPixReg is None: self.subPixReg = 1;
if self.subPixReg > 1.0:
# hannfilt = np.fft.fftshift( ram.apodization( name='hann', size=[subR*2,subR*2], radius=[subR,subR] ) ).astype( fftw_dtype )
# Need a forward transform that is [subR*2,subR*2]
self.__Csub = np.empty( [self.__subR*2,self.__subR*2], dtype=fftw_dtype )
self.__CsubFFT = np.empty( [self.__subR*2,self.__subR*2], dtype=fftw_dtype )
self.__subFFT2, _ = util.pyFFTWPlanner( self.__Csub, fouMage=self.__CsubFFT, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , effort = self.fftw_effort, n_threads=self.n_threads, doReverse = False )
# and reverse transform that is [subR*2*subPix, subR*2*subPix]
self.__CpadFFT = np.empty( [self.__subR*2*self.subPixReg,self.__subR*2*self.subPixReg], dtype=fftw_dtype )
self.__Csub_over = np.empty( [self.__subR*2*self.subPixReg,self.__subR*2*self.subPixReg], dtype=fftw_dtype )
_, self.__subIFFT2 = util.pyFFTWPlanner( self.__CpadFFT, fouMage=self.__Csub_over, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , effort = self.fftw_effort, n_threads=self.n_threads, doForward = False )
self.__maskProduct = np.zeros( self.__shapeCropped, dtype=float_dtype )
self.__normConst2 = np.float32( 1.0 / ( np.float64(self.__shapeCropped[0])*np.float64(self.__shapeCropped[1]))**2.0 )
self.bench['xcorr1'] = time.time()
return triIndices
def xcorrnm2_speckle( self, triIndices=None ):
"""
Robert A. McLeod
[email protected]
October 1, 2016
With data recorded automatically from SerialEM, we no long have access to the gain reference
normalization step provided by Gatan. With the K2 detector, gain normalization is no
longer a simple multiplication. Therefore we see additional, multiplicative (or speckle)
noise in the images compared to those recorded by Gatan Microscopy Suite. Here we want
to use a different approach from the Padfield algorithm, which is useful for suppressing
additive noise, and
In general Poisson noise should be speckle noise, especially at the dose rates commonly
seen in cryo-EM.
"""
triIndices = self.__init_xcorrnm2( triIndices = triIndices)
# Pre-compute forward FFTs (template will just be copied conjugate Fourier spectra)
self.__imageFFT = np.empty( [self.__N, self.shapePadded[0], self.shapePadded[1]], dtype=fftw_dtype )
self.__autocorrHalfs = np.empty( [self.__N, self.__shapeCropped[0], self.__shapeCropped[1]], dtype=float_dtype )
currIndex = 0
self.__originC = []; self.C = []
print( "Pre-computing forward Fourier transforms and autocorrelations" )
# For even-odd and noise estimates, we often skip many rows
# precompIndices = np.unique( np.vstack( [np.argwhere( np.sum( triIndices, axis=1 ) > 0 ), np.argwhere( np.sum( triIndices, axis=0 ) > 0 ) ] ) )
precompIndices = np.unique( np.vstack( [np.argwhere( np.sum( triIndices, axis=1 ) >= 0 ),
np.argwhere( np.sum( triIndices, axis=0 ) >= 0 ) ] ) )
for I in precompIndices:
if self.verbose >= 2:
print( "Precomputing forward FFT frame: " + str(I) )
# Apply masks to images
if self.masks.shape[0] == 1:
masks_block = self.masks[0,:,:]
images_block = self.images[I,:,:]
else:
masks_block = self.masks[I,:,:]
images_block = self.images[I,:,:]
self.__tempComplex = nz.evaluate( "masks_block * images_block" ).astype( fftw_dtype )
self.__FFT2.update_arrays( self.__tempComplex, self.__imageFFT[I,:,:]); self.__FFT2.execute()
print( "TODO: FOURIER CROPPING" )
# Compute autocorrelation
imageFFT = self.__imageFFT[I,:,:]
# Not sure if numexpr is useful for such a simple operation?
self.__tempComplex = nz.evaluate( "imageFFT * conj(imageFFT)" )
self.__IFFT2.update_arrays( self.__tempComplex, self.__tempComplex2 )
tempComplex2 = self.__tempComplex2
nz.evaluate( "0.5*abs(tempComplex2)", out=self.__autocorrHalfs[I,:,:] )
self.bench['xcorr2'] = time.time()
########### COMPUTE PHASE CORRELATIONS #############
print( "Starting correlation calculations, mode: " + self.triMode )
if self.triMode == u'refine':
# Find FFT sum (it must be reduced by the current frame later)
# FIXME: Is there some reason this might not be linear after FFT?
# FIXME: is it the complex conjugate operation below???
self.__sumFFT = np.sum( self.__baseImageFFT, axis = 0 )
self.__sumSquaredFFT = np.sum( self.__baseSquaredFFT, axis = 0 )
print( "In refine" )
for I in np.arange(self.images.shape[0] - 1):
# In refine mode we have to build the template on the fly from imageSum - currentImage
self.__templateImageFFT = np.conj( self.__sumFFT - self.__baseImageFFT[I,:,:] ) / self.images.shape[0]
self.__templateSquaredFFT = np.conj( self.__sumSquaredFFT - self.__baseSquaredFFT[I,:,:] ) / self.images.shape[0]
tempComplex2 = None
self.mnxc2_SPECKLE( I, I, self.__shapeCropped, refine=True )
#### Find maximum positions ####
self.locatePeak( I, I )
if self.verbose:
print( "Refine # " + str(I) + " shift: [%.2f"%self.__shiftsTriMat[I,I,0]
+ ", %.2f"%self.__shiftsTriMat[I,I,1]
+ "], cc: %.6f"%self.__corrTriMat[I,I]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,I] )
else:
# For even-odd and noise estimates, we often skip many rows
rowIndices = np.unique( np.argwhere( np.sum( triIndices, axis=1 ) > 0 ) )
#print( "rowIndices: " + str(rowIndices) )
for I in rowIndices:
# I is the index of the template image
tempComplex = self.__baseImageFFT[I,:,:]
self.__templateImageFFT = nz.evaluate( "conj(tempComplex)")
# Now we can start looping through base images
columnIndices = np.unique( np.argwhere( triIndices[I,:] ) )
#print( "columnIndices: " + str(columnIndices) )
for J in columnIndices:
####### MNXC2 revisement with private variable to make the code more manageable.
self.mnxc2_speckle( I, J, self.__shapeCropped )
#### Find maximum positions ####
self.locatePeak( I, J )
if self.verbose:
print( "# " + str(I) + "->" + str(J) + " shift: [%.2f"%self.__shiftsTriMat[I,J,0]
+ ", %.2f"%self.__shiftsTriMat[I,J,1]
+ "], cc: %.6f"%self.__corrTriMat[I,J]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,J] )
# Correlation stats is for establishing correlation scores for fixed-pattern noise.
if bool( self.trackCorrStats ):
self.calcCorrStats( currIndex, triIndices )
# triMode 'auto' diagonal mode
if self.triMode == u'auto' and (self.__peaksigTriMat[I,J] <= self.peaksigThres or J-I >= self.autoMax):
if self.verbose: print( "triMode 'auto' stopping at frame: " + str(J) )
break
currIndex += 1
pass # C max position location
if bool( np.any( self.fouCrop ) ):
self.__shiftsTriMat[:,:,0] *= self.shapePadded[0] / self.__shapeCropped[0]
self.__shiftsTriMat[:,:,1] *= self.shapePadded[1] / self.__shapeCropped[1]
self.bench['xcorr3'] = time.time()
# Pointer reference house-keeping
del images_block, masks_block, imageFFT, tempComplex2
def xcorrnm2_tri( self, triIndices=None ):
"""
Robert A. McLeod
[email protected]
April 16, 2015
triIndices is the index locations to correlate to. If None, self.triMode
is used to build one. Normally you should use self.triMode for the first iteration,
and pass in a triIndice from the errorDict if you want to repeat.
returns : [shiftsTriMat, corrTriMat, peaksTriMat]
This is an evolution of the Padfield cross-correlation algorithm to take
advantage of the Cheng multi-reference approach for cross-correlation
alignment of movies.
Padfield, "Masked object registration in the Fourier domain," IEEE
Transactions on Image Processing 21(5) (2012): 3706-2718.
Li et al. Nature Methods, 10 (2013): 584-590.
It cross-correlates every frame to every other frame to build a triangular
matrix of shifts and then does a functional minimization over the set of
equations. This means the computational cost grows with a power law with
the number of frames but it is more noise resistant.
triIndices can be an arbitrary boolean N x N matrix of frames to correlate
Alternatively it can be a string which will generate an appropriate matrix:
'tri' (default) correlates all frames to eachother
'first' is correlate to the first frame as a template
'diag' correlates to the next frame (i.e. a diagonal )
'auto' is like diag but automatically determines when to stop based on corrcoeffThes
diagWidth is for 'diag' and the number of frames to correlate each frame to,
default is None, which does the entire triangular matrix
diagWidth = 1 correlates to each preceding frame
NOTE: only calculates FFTs up to Nyquist/2.
"""
triIndices = self.__init_xcorrnm2( triIndices = triIndices)
if self.masks.shape[0] == 1 :
# tempComplex = self.masks[0,:,:].astype( fftw_dtype )
self.__baseMaskFFT = np.empty( self.__shapeCropped, dtype=fftw_dtype )
self.__FFT2.update_arrays( self.masks[0,:,:].squeeze().astype( fftw_dtype ), self.__tempFullframe ); self.__FFT2.execute()
# FFTCrop
sC2 = self.__shapeCropped2
self.__baseMaskFFT[0:sC2[0],0:sC2[1]] = self.__tempFullframe[0:sC2[0],0:sC2[1]]
self.__baseMaskFFT[0:sC2[0],-sC2[1]:] = self.__tempFullframe[0:sC2[0],-sC2[1]:]
self.__baseMaskFFT[-sC2[0]:,0:sC2[1]] = self.__tempFullframe[-sC2[0]:,0:sC2[1]]
self.__baseMaskFFT[-sC2[0]:,-sC2[1]:] = self.__tempFullframe[-sC2[0]:,-sC2[1]:]
self.__templateMaskFFT = np.conj( self.__baseMaskFFT )
# maskProduct term is M1^* .* M2
templateMaskFFT = self.__templateMaskFFT;
baseMaskFFT = self.__baseMaskFFT # Pointer assignment
self.__tempComplex2 = nz.evaluate( "templateMaskFFT * baseMaskFFT" )
self.__IFFT2.update_arrays( self.__tempComplex2, self.__tempComplex ); self.__IFFT2.execute()
tempComplex = self.__tempComplex
normConst2 = self.__normConst2
self.__maskProduct = nz.evaluate( "normConst2*real(tempComplex)" )
else:
# Pre-allocate only
self.__baseMaskFFT = np.zeros( [self.__N, self.__shapeCropped[0], self.__shapeCropped[1]], dtype=fftw_dtype )
if bool( self.maxShift ) or self.Bmode is u'fourier':
if self.maxShift is None or self.preShift is True:
[xmesh,ymesh] = np.meshgrid( np.arange(-self.__shapeCropped2[0], self.__shapeCropped2[0]),
np.arange(-self.__shapeCropped2[1], self.__shapeCropped2[1]) )
else:
[xmesh,ymesh] = np.meshgrid( np.arange(-self.maxShift, self.maxShift), np.arange(-self.maxShift, self.maxShift) )
rmesh2 = nz.evaluate( "xmesh*xmesh + ymesh*ymesh" )
# rmesh2 = xmesh*xmesh + ymesh*ymesh
if bool( self.maxShift ):
self.__mask_maxShift = ( rmesh2 < self.maxShift**2.0 )
if self.Bmode is u'fourier':
self.__Bfilter = np.fft.fftshift( util.apodization( name=self.BfiltType,
size=self.__shapeCropped,
radius=[self.Brad,self.Brad] ) )
self.bench['xcorr1'] = time.time()
# Pre-compute forward FFTs (template will just be copied conjugate Fourier spectra)
self.__imageFFT = np.empty( [self.__N, self.shapePadded[0], self.shapePadded[1]], dtype=fftw_dtype )
self.__baseImageFFT = np.empty( [self.__N, self.__shapeCropped[0], self.__shapeCropped[1]], dtype=fftw_dtype )
self.__baseSquaredFFT = np.empty( [self.__N, self.__shapeCropped[0], self.__shapeCropped[1]], dtype=fftw_dtype )
# Looping for triagonal matrix
# For auto this is wrong, so make these lists instead
currIndex = 0
self.__originC = []; self.C = []
print( "Pre-computing forward Fourier transforms" )
# For even-odd and noise estimates, we often skip many rows
# precompIndices = np.unique( np.vstack( [np.argwhere( np.sum( triIndices, axis=1 ) > 0 ), np.argwhere( np.sum( triIndices, axis=0 ) > 0 ) ] ) )
precompIndices = np.unique( np.vstack( [np.argwhere( np.sum( triIndices, axis=1 ) >= 0 ),
np.argwhere( np.sum( triIndices, axis=0 ) >= 0 ) ] ) )
for I in precompIndices:
if self.verbose >= 2:
print( "Precomputing forward FFT frame: " + str(I) )
# Apply masks to images
if self.masks.shape[0] == 1:
masks_block = self.masks[0,:,:]
images_block = self.images[I,:,:]
else:
masks_block = self.masks[I,:,:]
images_block = self.images[I,:,:]
tempReal = nz.evaluate( "masks_block * images_block" ).astype( fftw_dtype )
self.__FFT2.update_arrays( tempReal, self.__tempFullframe ); self.__FFT2.execute()
if self.shiftMethod == u"fourier":
self.__imageFFT[I,:,:] = self.__tempFullframe.copy(order='C')
# FFTCrop
self.__baseImageFFT[I,0:sC2[0],0:sC2[1]] = self.__imageFFT[I,0:sC2[0],0:sC2[1]]
self.__baseImageFFT[I,0:sC2[0],-sC2[1]:] = self.__imageFFT[I,0:sC2[0],-self.__sC2[1]:]
self.__baseImageFFT[I,-sC2[0]:,0:sC2[1]] = self.__imageFFT[I,-sC2[0]:,0:self.__sC2[1]]
self.__baseImageFFT[I,-sC2[0]:,-sC2[1]:] = self.__imageFFT[I,-sC2[0]:,-sC2[1]:]
print( "TODO: check memory consumption" )
else:
# FFTCrop
self.__baseImageFFT[I,0:sC2[0],0:sC2[1]] = self.__tempFullframe[0:sC2[0],0:sC2[1]]
self.__baseImageFFT[I,0:sC2[0],-sC2[1]:] = self.__tempFullframe[0:sC2[0],-sC2[1]:]
self.__baseImageFFT[I,-sC2[0]:,0:sC2[1]] = self.__tempFullframe[-sC2[0]:,0:sC2[1]]
self.__baseImageFFT[I,-sC2[0]:,-sC2[1]:] = self.__tempFullframe[-sC2[0]:,-sC2[1]:]
self.__FFT2.update_arrays( nz.evaluate( "tempReal*tempReal" ).astype( fftw_dtype ), self.__tempFullframe ); self.__FFT2.execute()
# FFTCrop
self.__baseSquaredFFT[I,0:sC2[0],0:sC2[1]] = self.__tempFullframe[0:sC2[0],0:sC2[1]]
self.__baseSquaredFFT[I,0:sC2[0],-sC2[1]:] = self.__tempFullframe[0:sC2[0],-sC2[1]:]
self.__baseSquaredFFT[I,-sC2[0]:,0:sC2[1]] = self.__tempFullframe[-sC2[0]:,0:sC2[1]]
self.__baseSquaredFFT[I,-sC2[0]:,-sC2[1]:] = self.__tempFullframe[-sC2[0]:,-sC2[1]:]
if not self.masks.shape[0] == 1:
self.__FFT2.update_arrays( self.masks[I,:,:].squeeze().astype( fftw_dtype), self.__tempFullframe ); self.__FFT2.execute()
# FFTCrop
self.__baseMaskFFT[I,0:sC2[0],0:sC2[1]] = self.__tempFullframe[0:sC2[0],0:sC2[1]]
self.__baseMaskFFT[I,0:sC2[0],-sC2[1]:] = self.__tempFullframe[0:sC2[0],-sC2[1]:]
self.__baseMaskFFT[I,-sC2[0]:,0:sC2[1]] = self.__tempFullframe[-sC2[0]:,0:sC2[1]]
self.__baseMaskFFT[I,-sC2[0]:,-sC2[1]:] = self.__tempFullframe[-sC2[0]:,-sC2[1]:]
pass
del masks_block, images_block
self.bench['xcorr2'] = time.time()
print( "Starting correlation calculations, mode: " + self.triMode )
if self.triMode == u'refine':
# Find FFT sum (it must be reduced by the current frame later)
# FIXME: Is there some reason this might not be linear after FFT?
# FIXME: is it the complex conjugate operation below???
self.__sumFFT = np.sum( self.__baseImageFFT, axis = 0 )
self.__sumSquaredFFT = np.sum( self.__baseSquaredFFT, axis = 0 )
print( "In refine" )
for I in np.arange(self.images.shape[0] - 1):
# In refine mode we have to build the template on the fly from imageSum - currentImage
self.__templateImageFFT = np.conj( self.__sumFFT - self.__baseImageFFT[I,:,:] ) / self.images.shape[0]
self.__templateSquaredFFT = np.conj( self.__sumSquaredFFT - self.__baseSquaredFFT[I,:,:] ) / self.images.shape[0]
tempComplex2 = None
self.mnxc2( I, I, self.__shapeCropped, refine=True )
#### Find maximum positions ####
self.locatePeak( I, I )
if self.verbose:
print( "Refine # " + str(I) + " shift: [%.2f"%self.__shiftsTriMat[I,I,0]
+ ", %.2f"%self.__shiftsTriMat[I,I,1]
+ "], cc: %.6f"%self.__corrTriMat[I,I]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,I] )
else:
# For even-odd and noise estimates, we often skip many rows
rowIndices = np.unique( np.argwhere( np.sum( triIndices, axis=1 ) > 0 ) )
#print( "rowIndices: " + str(rowIndices) )
for I in rowIndices:
# I is the index of the template image
tempComplex = self.__baseImageFFT[I,:,:]
self.__templateImageFFT = nz.evaluate( "conj(tempComplex)")
tempComplex2 = self.__baseSquaredFFT[I,:,:]
self.__templateSquaredFFT = nz.evaluate( "conj(tempComplex2)")
if not self.masks.shape[0] == 1:
tempComplex = baseMaskFFT[I,:,:]
self.__templateMaskFFT = nz.evaluate( "conj(tempComplex)")
# Now we can start looping through base images
columnIndices = np.unique( np.argwhere( triIndices[I,:] ) )
#print( "columnIndices: " + str(columnIndices) )
for J in columnIndices:
####### MNXC2 revisement with private variable to make the code more manageable.
self.mnxc2( I, J, self.__shapeCropped )
#### Find maximum positions ####
self.locatePeak( I, J )
if self.verbose:
print( "# " + str(I) + "->" + str(J) + " shift: [%.2f"%self.__shiftsTriMat[I,J,0]
+ ", %.2f"%self.__shiftsTriMat[I,J,1]
+ "], cc: %.6f"%self.__corrTriMat[I,J]
+ ", peak sig: %.3f"%self.__peaksigTriMat[I,J] )
# Correlation stats is for establishing correlation scores for fixed-pattern noise.
if bool( self.trackCorrStats ):
# Track the various statistics about the correlation map, mean, std, max, skewness
self.calcCorrStats( currIndex, triIndices )
# triMode 'auto' diagonal mode
if self.triMode == u'auto' and (self.__peaksigTriMat[I,J] <= self.peaksigThres or J-I >= self.autoMax):
if self.verbose: print( "triMode 'auto' stopping at frame: " + str(J) )
break
currIndex += 1
pass # C max position location
if bool( np.any( self.fouCrop ) ):
self.__shiftsTriMat[:,:,0] *= self.shapePadded[0] / self.__shapeCropped[0]
self.__shiftsTriMat[:,:,1] *= self.shapePadded[1] / self.__shapeCropped[1]
self.bench['xcorr3'] = time.time()
# Pointer reference house-keeping
del templateMaskFFT, tempComplex, tempComplex2 # Pointer
return
def mnxc2( self, I, J, shapeCropped, refine=False ):
"""
2-D Masked, Intensity Normalized, Cross-correlation
"""
tempComplex = self.__tempComplex # Pointer re-assignment
tempComplex2 = self.__tempComplex2 # Pointer re-assignment
maskProduct = self.__maskProduct
normConst2 = self.__normConst2
if not self.masks.shape[0] == 1:
# Compute maskProduct, term is M1^* .* M2
baseMask_block = self.__baseMaskFFT[J,:,:]; templateMaskFFT = self.__templateMaskFFT # Pointer re-assignment
tempComplex2 = nz.evaluate( "templateMaskFFT * baseMask_block" )
self.__IFFT2.update_arrays( tempComplex2, tempComplex ); self.__IFFT2.execute()
# maskProduct = np.clip( np.round( np.real( tempComplex ) ), eps, np.Inf )
self.__maskProduct = nz.evaluate( "real(tempComplex)*normConst2" )
# Compute mask correlation terms
if self.masks.shape[0] == 1:
templateImageFFT = self.__templateImageFFT; baseMask_block = self.__baseMaskFFT # Pointer re-assignment
self.__IFFT2.update_arrays( nz.evaluate( "baseMask_block * templateImageFFT"), tempComplex ); self.__IFFT2.execute()
Corr_templateMask = nz.evaluate( "real(tempComplex)*normConst2" ) # Normalization
baseImageFFT_block = self.__baseImageFFT[J,:,:]; templateMaskFFT = self.__templateMaskFFT
self.__IFFT2.update_arrays( nz.evaluate( "templateMaskFFT * baseImageFFT_block"), tempComplex ); self.__IFFT2.execute()
# These haven't been normalized, so let's do so. They are FFT squared, so N*N
# This reduces the strain on single-precision range.
Corr_baseMask = nz.evaluate( "real(tempComplex)*normConst2" ) # Normalization
# Compute the intensity normalzaiton for the template
if self.masks.shape[0] == 1:
baseMaskFFT = self.__baseMaskFFT; templateSquaredFFT = self.__templateSquaredFFT
self.__IFFT2.update_arrays( nz.evaluate( "baseMaskFFT * templateSquaredFFT"), tempComplex ); self.__IFFT2.execute()
else:
self.__IFFT2.update_arrays( nz.evaluate( "baseMaskFFT_block * templateSquaredFFT"), tempComplex ); self.__IFFT2.execute()
# DenomTemplate = nz.evaluate( "real(tempComplex)*normConst2 - real( Corr_templateMask * (Corr_templateMask / maskProduct) )" )
# Compute the intensity normalzaiton for the base Image
baseSquared_block = self.__baseSquaredFFT[J,:,:]
self.__IFFT2.update_arrays( nz.evaluate( "templateMaskFFT * baseSquared_block"), tempComplex2 ); self.__IFFT2.execute()
# Compute Denominator intensity normalization
# DenomBase = nz.evaluate( "real(tempComplex2)*normConst2- real( Corr_baseMask * (Corr_baseMask / maskProduct) )" )
Denom = nz.evaluate( "sqrt( (real(tempComplex2)*normConst2- real( Corr_baseMask * (Corr_baseMask / maskProduct)))" +
"* (real(tempComplex)*normConst2 - real( Corr_templateMask * (Corr_templateMask / maskProduct)) ) )" )
# What happened to numexpr clip?
Denom = np.clip( Denom, 1, np.Inf )
# print( "Number of small Denominator values: " + str(np.sum(DenomTemplate < 1.0)) )
# Compute Numerator (the phase correlation)
tempComplex2 = nz.evaluate( "baseImageFFT_block * templateImageFFT" )
self.__IFFT2.update_arrays( tempComplex2, tempComplex ); self.__IFFT2.execute()
# Numerator = nz.evaluate( "real(tempComplex)*normConst2 - real( Corr_templateMask * Corr_baseMask / maskProduct)" )
# Compute final correlation
self.__C = nz.evaluate( "(real(tempComplex)*normConst2 - real( Corr_templateMask * Corr_baseMask / maskProduct)) / Denom" )
# print( "%%%% mnxc2.Denom.dtype = " + str(Denom.dtype) )
self.__originTriMat[I,J] = self.__C[0,0]
if bool(self.suppressOrigin):
# If gain reference is quite old we can still get one bright pixel at the center.
# The hot pixel filter has mitigated this but it's still a minor source of bias.
self.__C[0,0] = 0.125 * ( self.__C[1,0] + self.__C[0,1] + self.__C[-1,0] + self.__C[-1,0] +
self.__C[1,1] + self.__C[-1,1] + self.__C[-1,1] + self.__C[-1,-1] )
# We have everything in normal FFT order until here; Some speed-up could be found by its removal.
# Pratically we don't have to do this fftshift, but it makes plotting easier to understand
self.__C = np.fft.ifftshift( self.__C )
# We can crop C if maxShift is not None and preShift is False
if self.maxShift is not None and self.preShift is False:
shapeCropped2 = (np.array(shapeCropped)/2.0).astype('int')
self.__C = self.__C[shapeCropped2[0]-self.maxShift:shapeCropped2[0]+self.maxShift, shapeCropped2[1]-self.maxShift:shapeCropped2[1]+self.maxShift]
del normConst2, baseMask_block, templateMaskFFT, templateImageFFT, Corr_templateMask, baseImageFFT_block
del Corr_baseMask, baseSquared_block, baseMaskFFT, templateSquaredFFT, maskProduct
del tempComplex, tempComplex2
def locatePeak( self, I, J ):
"""
Subpixel peak location by Fourier interpolation.
"""
tempComplex = self.__tempComplex; tempComplex2 = self.__tempComplex2
# Apply B-factor low-pass filter to correlation function
if self.Bmode == 'opti':
self.bench['opti0'] = time.time()
# Want to define this locally so it inherits scope variables.
def inversePeakContrast( Bsigma ):
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, Bsigma )
return np.std(self.__C_filt ) / (np.max(self.__C_filt ) - np.mean(self.__C_filt ) )
# B_opti= scipy.optimize.fminbound( inversePeakContrast, 0.0, 10.0, xtol=1E-3 )
sigmaOptiMax = 7.0
sigmaOptiMin = 0.0
maxIter = 15 # Let's apply some more constraints to speed this up
tolerance = 0.01
result = scipy.optimize.minimize_scalar( inversePeakContrast,
bounds=[sigmaOptiMin,sigmaOptiMax], method="bounded",
options={'maxiter':maxIter, 'xatol':tolerance } )
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, result.x )
self.bench['opti0'] = time.time()
if self.verbose >= 2:
print( "Found optimum B-sigma: %.3f"%result.x + ", with peak sig: %.3f"%(1.0/result.fun)+" in %.1f"%(1E3*(self.bench['opti1']-self.bench['opti0']))+" ms" )
elif bool(self.Brad) and self.Bmode =='fourier':
tempComplex = self.__C.astype(fftw_dtype)
self.__FFT2.update_arrays( tempComplex, tempComplex2 ); self.__FFT2.execute()
Bfilter = self.__Bfilter
self.__IFFT2.update_arrays( nz.evaluate( "tempComplex2*Bfilter" ), tempComplex ); self.__IFFT2.execute()
# Conservation of counts with Fourier filtering is not
# very straight-forward.
C_filt = nz.evaluate( "real( tempComplex )/sqrt(normConst)" )
elif bool(self.Brad) and self.Bmode == u'conv' or self.Bmode == u'convolution':
# Convert self.Brad as an MTF to an equivalent sigma for a PSF
# TODO: Check that Bsigma is correct with Fourier cropping"
Bsigma = self.shapePadded / (np.sqrt(2) * np.pi * self.Brad)
# Scipy's gaussian filter conserves total counts
self.__C_filt = scipy.ndimage.gaussian_filter( self.__C, Bsigma )
else: # No filtering
self.__C_filt = self.__C
# Apply maximum shift max mask, if present
if bool( self.maxShift ):
# for previous frame alignment compensation, we need to shift the mask around...
C_filt = self.__C_filt
if bool( self.preShift ):
# print( "In pre-shift" )
# This isn't working with 'refine'
if self.triMode != u'refine':
rolledMask = np.roll( np.roll( self.__mask_maxShift,
np.round(self.__shiftsTriMat[I,J-1,0]).astype('int'), axis=0 ),
np.round(self.__shiftsTriMat[I,J-1,1]).astype('int'), axis=1 )
elif self.triMode == u'refine':
# With refine the matrix is populated like an autocorrelation function.
rolledMask = np.roll( np.roll( self.__mask_maxShift,
np.round(self.__shiftsTriMat[I-1,I-1,0]).astype('int'), axis=0 ),
np.round(self.__shiftsTriMat[I-1,I-1,1]).astype('int'), axis=1 )
pass
C_masked = nz.evaluate("C_filt*rolledMask")
cmaxpos = np.unravel_index( np.argmax( C_masked ), C_masked.shape )
self.__peaksigTriMat[I,J] = (C_masked[cmaxpos] - np.mean(C_filt[rolledMask]))/ np.std(C_filt[rolledMask])
else:
mask_maxShift = self.__mask_maxShift
C_masked = nz.evaluate("C_filt*mask_maxShift")
cmaxpos = np.unravel_index( np.argmax( C_masked ), C_filt.shape )
self.__peaksigTriMat[I,J] = (C_masked[cmaxpos] - np.mean(C_filt[self.__mask_maxShift]))/ np.std(C_filt[self.__mask_maxShift])
else: # No maxshift
cmaxpos = np.unravel_index( np.argmax(C_filt), C_filt.shape )
self.__peaksigTriMat[I,J] = (self.__corrTriMat[I,J] - np.mean(C_filt))/ np.std(C_filt)
if self.saveC:
# Maybe save in a pyTable if it's really needed.peaksig
if self.preShift:
self.C.append(self.__C_filt*rolledMask)
else:
self.C.append(self.__C_filt)
if self.subPixReg > 1.0: # Subpixel peak estimation by Fourier interpolation
Csub = C_filt[cmaxpos[0]-self.__subR:cmaxpos[0]+self.__subR, cmaxpos[1]-self.__subR:cmaxpos[1]+self.__subR ]
# Csub is shape [2*subR, 2*subR]
if Csub.shape[0] == 2*self.__subR and Csub.shape[1] == 2*self.__subR:
self.__subFFT2.update_arrays( Csub.astype( fftw_dtype ), self.__CsubFFT ); self.__subFFT2.execute()
# padding has to be done from the middle
# TODO: I think pad has issues with complex numbers?
#CpadFFT = np.pad( np.fft.fftshift(self.__CsubFFT), ((self.subPixReg-1)*self.__subR,), mode=b'constant', constant_values=(0.0,) )
self.__CpadFFT = np.zeros( [self.subPixReg*self.__subR*2,self.subPixReg*self.__subR*2], dtype=fftw_dtype )
# NUMPY BUG: mode has to be a byte string
self.__CpadFFT.real = np.pad( np.fft.fftshift(self.__CsubFFT.real), ((self.subPixReg-1)*self.__subR,), mode=constantPad, constant_values=(0.0,) )
self.__CpadFFT.imag = np.pad( np.fft.fftshift(self.__CsubFFT.imag), ((self.subPixReg-1)*self.__subR,), mode=constantPad, constant_values=(0.0,) )
self.__CpadFFT = np.fft.ifftshift( self.__CpadFFT )
self.__subIFFT2.update_arrays( self.__CpadFFT, self.__Csub_over ); self.__subIFFT2.execute()
# Csub_overAbs = nz.evaluate( "abs( Csub_over )") # This is still complex
Csub_overAbs = np.abs( self.__Csub_over )
Csub_maxpos = np.unravel_index( np.argmax( Csub_overAbs ), Csub_overAbs.shape )
round_pos = cmaxpos - np.array(self.__C.shape)/2.0
# Csub_max is being shifted 1 sub-pixel in the negative direction compared to the integer shift
# because of array centering, hence the np.sign(round_pos)
remainder_pos = Csub_maxpos - np.array(self.__Csub_over.shape)/2.0 + np.sign( round_pos )
remainder_pos /= self.subPixReg
# shiftsTriMat[I,J-1,:] = cmaxpos + np.array( Csub_maxpos, dtype='float' )/ np.float(self.subPixReg) - np.array( [subR, subR] ).astype('float')
self.__shiftsTriMat[I,J,:] = round_pos + remainder_pos
# Switching from FFTpack to pyFFTW has messed up the scaling of the correlation coefficients, so
# scale by (subR*2.0)**2.0
self.__corrTriMat[I,J] = Csub_overAbs[ Csub_maxpos[0], Csub_maxpos[1] ] / (self.__subR*2.0)**2.0
else:
print( "Correlation sub-area too close to maxShift! Subpixel location broken. Consider increasing maxShift." )
self.__shiftsTriMat[I,J,:] = cmaxpos - np.array(self.__C.shape)/2.0
self.__corrTriMat[I,J] = self.__C[ cmaxpos[0], cmaxpos[1] ]
else: # Do integer pixel registration
self.__shiftsTriMat[I,J,:] = cmaxpos - np.array(self.__C.shape)/2.0
self.__corrTriMat[I,J] = self.__C[ cmaxpos[0], cmaxpos[1] ]
del tempComplex, tempComplex2
try:
del mask_maxShift, Bfilter
except: pass
pass
def calcCorrStats( self, currIndex, triIndices ):
# Track the various statistics about the correlation map, mean, std, max, skewness
if currIndex == 0 or self.corrStats is None:
# Mean, std, max, maxposx, maxposy, (val at 0,0), imageI mean, imageI std, imageJ mean, imageJ std = 10 columns
K = np.sum(triIndices)
self.corrStats = {}
self.corrStats[u'K'] = K
self.corrStats[u'meanC'] = np.zeros([K])
self.corrStats[u'varC'] = np.zeros([K])
self.corrStats[u'maxC'] = np.zeros([K])
self.corrStats[u'maxPos'] = np.zeros([K,2])
self.corrStats[u'originC'] = np.zeros([K])
print( "Computing stack mean" )
self.corrStats[u'stackMean'] = np.mean( self.images )
print( "Computing stack variance" )
self.corrStats[u'stackVar'] = np.var( self.images )
self.corrStats[u'meanC'][currIndex] = np.mean(self.__C_filt)
self.corrStats[u'varC'][currIndex] = np.var(self.__C_filt)
self.corrStats[u'maxC'][currIndex] = np.max(self.__C_filt)
self.corrStats[u'maxPos'][currIndex,:] = np.unravel_index( np.argmax(self.__C_filt), \
self.__shapeCropped ) - \
np.array([self.__C_filt.shape[0]/2, self.__C_filt.shape[1]/2])
self.corrStats[u'originC'][currIndex] = self.__C_filt[self.__C.shape[0]/2, self.__C.shape[1]/2]
def shiftsSolver( self, shiftsTriMat_in, corrTriMat_in, peaksigTriMat_in,
acceptedEqns=None, mode='basin', Niter=100 ):
"""
Functional minimization optimization of the triangular correlation matrix
Minimizes the RMS error for the individual frame position equations, and
outputs an error dictionary.
acceptedEqns is 'good' equations as determined by a previous run.
Should always be None for the first iteration.
mode can be 'basin' for the global optimizer or 'local' for the local optimizer.
In general the performance penalty for the global optimizer is trivial.
Niter is the number of iterations for the
"""
# Change to allow the autocorrelations to be present, but we never want them in the solver
shiftsTriMat = shiftsTriMat_in[:-1,1:,:]
corrTriMat = corrTriMat_in[:-1,1:]
peaksigTriMat = peaksigTriMat_in[:-1,1:]
triIndices = corrTriMat.astype( 'bool' )
# Build a dictionary of all the feedback parameters
errorDict = {}
# Append the dictionary to the list of dicts and return it as well
self.errorDictList.append( errorDict )
errorDict['corrTriMat'] = corrTriMat_in
errorDict['peaksigTriMat'] = peaksigTriMat_in
shapeImage = np.array( [self.images.shape[1], self.images.shape[2]] )
N = np.asarray( self.images.shape )[0] - 1
last_col = np.zeros( N, dtype='int' )
#### BUILD VECTORIZED SHIFTS b_x, b_y AND EQUATION COEFFICIENT MATRIX Acoeff
M = 0
for I in np.arange(0,N, dtype='int'):
# Find the last non-zero element in the tri-matrix for each row
# This determines the sub-sampled view for each equation set.
if triIndices[I,:].any():
last_col[I] = np.argwhere(triIndices[I,:])[-1] + 1
M += last_col[I] - I
# For some reason this becomes -1 if we make last_col not float.
M = np.int(M)
Acoeff = np.zeros( [M,N] )
Arow_pos = 0
for I in np.arange(0,N, dtype='int'):
rotview = np.rot90( triIndices[I:last_col[I],I:last_col[I]], k=2 )
Acoeff[ Arow_pos:Arow_pos+rotview.shape[0], I:I+rotview.shape[1] ] = rotview
Arow_pos += rotview.shape[0]
# triIndices = corrTriMat.astype( 'bool' )
# Now we can ravel triIndices and get the indices from that
vectorIndices = np.arange(0,triIndices.size)[np.ravel( triIndices )]
# And this is to go backwards from a vector to an upper-triangular matrix
unravelIndices = np.unravel_index( vectorIndices, [N,N] )
b_x = np.ravel( shiftsTriMat[triIndices,1] )
b_y = np.ravel( shiftsTriMat[triIndices,0] )
#### REMOVE UNACCEPTED EQUATIONS FROM THE SOLVER ####
# This was a cornerstone of MotionCorr but it often leads to problems, so let's avoid it completely
# in favour of deweighting bad equations.
if acceptedEqns is None:
Maccepted = M
acceptedEqns = np.ones_like( b_x, dtype='bool' )
else:
Maccepted = np.sum( acceptedEqns )
print( "Optimization of shifts over M = " + str(Maccepted) + " equations." )
#### WEIGHTS FOR OPTIMIZATION ####
# There's only 2.5 % difference between the weighted and un-weighted versions for the STEM test cases.
# CryoEM would be expected to be higher as the CC's are about 0.001 compared to 0.3
if self.weightMode is None or self.weightMode == u'corr': # use raw correlation scores or peaksig
weights = np.ravel( peaksigTriMat[triIndices] )
elif self.weightMode is u'unweighted': # don't weight peaks
weights = np.ones_like( np.ravel( peaksigTriMat[triIndices] ) )
elif self.weightMode == u'norm' or self.weightMode == u'normalized':
### Scale the weights so that lower correlations count for next-to-nothing
weights = util.normalize( np.ravel( peaksigTriMat[triIndices] ) )
elif self.weightMode == u'autologistic':
# Calculate a logistic from the CDF of the peaksig values
self.cdfLogisticCurve() # Sets peaksigThres, logisticK, and logisticNu
peakSig = np.ravel( peaksigTriMat[triIndices] ).astype( 'float64' )
weights = 1.0 - 1.0 / (1.0 + np.exp( -self.logisticK*(-peakSig + self.peaksigThres) ) )**self.logisticNu
elif self.weightMode == u'logistic':
# Use a fixed
peakSig = np.ravel( peaksigTriMat[triIndices] ).astype( 'float64' )
weights = 1.0 - 1.0 / (1.0 + np.exp( -self.logisticK*(-peakSig + self.peaksigThres) ) )**self.logisticNu
else:
print( "UNKNOWN WEIGHTING METHOD, REVERTING TO CORRELATION SCORES" )
weights = np.ravel( peaksigTriMat[triIndices] )
# logisticCutoff = 0.01 # Value of logistic weight at the cutoff Correlation threshold. Should never, ever be below 0.5
# C_cutoff = (1/self.weightK)* np.log( 1.0 / logisticCutoff - 1 )
# if self.corrThres is None:
# raise AssertionError("Zorro.shiftsSolver requires a correlation threshold for logistical weighting")
# weights = 1.0 / ( 1.0 + self.weightK * np.exp(np.ravel( peaksigTriMat[triIndices] ) - self.corrThres - C_cutoff) )
#### REMOVE UNACCEPTED EQUATIONS FROM THE SOLVER ####
if acceptedEqns is None:
Maccepted = M
acceptedEqns = np.ones_like( b_x, dtype='bool' )
else:
Maccepted = np.sum( acceptedEqns )
#### SETUP THE FUNCTIONAL OPTIMIZER ####
pix_tol = 1.0E-5 # The fraction of a pixel we try to solve to (so one 10'000th of a pixel)
relativeEst = np.zeros( [N, 2] )
drift_guess = np.zeros( N )
bounds = np.ones( [N,2] )
bounds[:,0] = -1.0
# Bounds scales by self.maxShift * number of frames
if self.maxShift is None:
bounds *= np.min( [shapeImage[0]/2.0, shapeImage[1]/2.0] )
else:
bounds *= np.min( [shapeImage[0]/2.0, shapeImage[1]/2.0, N*self.maxShift] )
if mode == u'local':
#### LOCAL MINIMIZATION X, Y SOLUTION ####
# Is there any value for a simultaneous X-Y solution? No, because the A-coefficient
# matrix would be:
# Acoeff2 = np.hstack( (np.vstack( (Acoeff, zeroA) ), np.vstack( (zeroA, Acoeff) )) )
# So the two sets of equations are completely independent
try:
outX = scipy.optimize.minimize( util.weightedErrorNorm, drift_guess, method="L-BFGS-B",
args=(Acoeff, b_x, weights*acceptedEqns),
bounds=bounds, tol=pix_tol )
# outX = scipy.optimize.minimize( weightedErrorNorm, drift_guess, method="L-BFGS-B",
# args=(Acoeff[acceptedEqns,:], b_x[acceptedEqns], weights[acceptedEqns]),
# bounds=bounds, tol=pix_tol )
relativeEst[:,1] = outX.x
except:
raise RuntimeError( "Error: caught exception on X-minimizer" )
try:
outY = scipy.optimize.minimize( util.weightedErrorNorm, drift_guess, method="L-BFGS-B",
args=(Acoeff, b_y, weights*acceptedEqns),
bounds=bounds, tol=pix_tol )
relativeEst[:,0] = outY.x
except:
raise RuntimeError( "Error: caught exception on Y-minimizer" )
elif mode == u'basin':
#### GLOBAL MINIMIZATION X, Y SOLUTION ####
basinArgs = {}
basinArgs[u"bounds"] = bounds
basinArgs[u"tol"] = pix_tol
basinArgs[u"method"] = u"L-BFGS-B"
basinArgs[u"args"] = (Acoeff, b_x, weights*acceptedEqns)
try:
outX = scipy.optimize.basinhopping( util.weightedErrorNorm, drift_guess, niter=Niter, minimizer_kwargs=basinArgs )
relativeEst[:,1] = outX.x
except:
raise RuntimeError( "Error: caught exception on X-minimizer" )
# basinArgs["args"] = (Acoeff[acceptedEqns], b_y[acceptedEqns], weights[acceptedEqns])
basinArgs[u"args"] = (Acoeff, b_y, weights*acceptedEqns)
try:
outY = scipy.optimize.basinhopping( util.weightedErrorNorm, drift_guess, niter=Niter, minimizer_kwargs=basinArgs )
relativeEst[:,0] = outY.x
except:
raise RuntimeError( "Error: caught exception on Y-minimizer" )
else:
print( "Error: mode not understood by shiftsMinimizer: " + mode )
return
#### ERROR ANALYSIS (for precision of estimated shifts) ####
acceptedEqnsUnraveled = np.zeros( [N,N] )
acceptedEqnsUnraveled[unravelIndices[0], unravelIndices[1]] = acceptedEqns
acceptedEqnsUnraveled = np.pad( acceptedEqnsUnraveled, ((0,1),(1,0)), mode=constantPad )
# Ok so how big is relativeEst? Can we add in zeros?
# Or maybe I should just give weights as weights*acceptedEqnsUnr
errorXY = np.zeros( [M,2] )
############# Unweighted error ################
"""
errorXY[:,1] = np.dot( Acoeff, relativeEst[:,1] ) - b_x
errorXY[:,0] = np.dot( Acoeff, relativeEst[:,0] ) - b_y
errorNorm = np.sqrt( errorXY[:,0]*errorXY[:,0] + errorXY[:,1]*errorXY[:,1] )
mean_errorNorm = np.mean( errorNorm[acceptedEqns] )
std_errorNorm = np.std( errorNorm[acceptedEqns] )
# Error unraveled (i.e. back to the upper triangular matrix form)
errorUnraveled = np.zeros( [N,N] )
errorXun = np.zeros( [N,N] )
errorYun = np.zeros( [N,N] )
errorUnraveled[unravelIndices[0], unravelIndices[1]] = errorNorm
errorXun[unravelIndices[0], unravelIndices[1]] = np.abs( errorXY[:,1] )
errorYun[unravelIndices[0], unravelIndices[1]] = np.abs( errorXY[:,0] )
errorXun = np.pad( errorXun, ((0,1),(1,0)), mode=constantPad )
errorYun = np.pad( errorYun, ((0,1),(1,0)), mode=constantPad )
triPadded = np.pad( triIndices, ((0,1),(1,0)), mode=constantPad )
# Mask out un-used equations from error numbers
errorYun = errorYun * acceptedEqnsUnraveled
errorXun = errorXun * acceptedEqnsUnraveled
triPadded = triPadded * acceptedEqnsUnraveled
# errorX and Y are per-frame error estimates
errorX = np.zeros( N+1 )
errorY = np.zeros( N+1 )
# Sum horizontally and vertically, keeping in mind diagonal is actually at x-1
for J in np.arange(0,N+1):
# Here I often get run-time warnings, which suggests a divide-by-zero or similar.
try:
errorX[J] = ( np.sum( errorXun[J,:]) + np.sum(errorXun[:,J-1]) ) / ( np.sum( triPadded[J,:]) + np.sum(triPadded[:,J-1]) )
except:
pass
try:
errorY[J] = ( np.sum( errorYun[J,:]) + np.sum(errorYun[:,J-1]) ) / ( np.sum( triPadded[J,:]) + np.sum(triPadded[:,J-1]) )
except:
pass
"""
################## Weighted error ######################
# Make any zero weight just very small
weights = np.clip( weights, 1E-6, np.Inf )
errorXY[:,1] = np.dot( Acoeff, relativeEst[:,1] ) - b_x
errorXY[:,0] = np.dot( Acoeff, relativeEst[:,0] ) - b_y
errorNorm = np.sqrt( errorXY[:,0]*errorXY[:,0] + errorXY[:,1]*errorXY[:,1] )
acceptedErrorNorm = errorNorm[acceptedEqns]
mean_errorNorm = np.sum( weights * acceptedErrorNorm ) / np.sum(weights)
mean_unweighted = np.mean( errorNorm[acceptedEqns] )
# print( "RMS: " + str(np.sum( weights * acceptedErrorNorm**2 )) )
# print( "Normed RMS: " + str(np.sum( weights * acceptedErrorNorm**2 ) / np.sum(weights)))
# print( "mean_errorNorm**2 + " + str( mean_errorNorm**2 ))
std_errorNorm = np.sqrt( np.sum( weights * acceptedErrorNorm**2 )
/ np.sum(weights) - mean_errorNorm**2 )
# np.sqrt( np.sum( unalignedHist * unalignedCounts**2 )/ sumFromHist - meanFromHist*meanFromHist )
std_unweighted = np.std( acceptedErrorNorm )
# print( "sum(acceptedErrorNorm): %f" % np.sum(acceptedErrorNorm) )
print( "MEAN ERROR (weighted: %f | unweighted: %f )" % (mean_errorNorm, mean_unweighted) )
print( "STD ERROR (weighted: %f | unweighted: %f )" % (std_errorNorm, std_unweighted) )
# Error unraveled (i.e. back to the upper triangular matrix form)
errorUnraveled = np.zeros( [N,N] )
errorXun = np.zeros( [N,N] )
errorYun = np.zeros( [N,N] )
weightsUn = np.zeros( [N,N] )
errorUnraveled[unravelIndices[0], unravelIndices[1]] = errorNorm
weightsUn[unravelIndices[0], unravelIndices[1]] = weights
errorXun[unravelIndices[0], unravelIndices[1]] = np.abs( errorXY[:,1] )
errorYun[unravelIndices[0], unravelIndices[1]] = np.abs( errorXY[:,0] )
errorXun = np.pad( errorXun, ((0,1),(1,0)), mode=constantPad )
errorYun = np.pad( errorYun, ((0,1),(1,0)), mode=constantPad )
triPadded = np.pad( triIndices, ((0,1),(1,0)), mode=constantPad )
weightsUn = np.pad( weightsUn, ((0,1),(1,0)), mode=constantPad )
# DEBUG: weighted error trimats
# plot.ims( (errorXun, weightsUn, errorYun, acceptedEqnsUnraveled), titles=( "errorXun","weightsUn","errorYun", "AcceptedUnraveled") )
# Mask out un-used equations from error numbers
errorYun = errorYun * acceptedEqnsUnraveled
errorXun = errorXun * acceptedEqnsUnraveled
triPadded = triPadded * acceptedEqnsUnraveled
# errorX and Y are per-frame error estimates
errorX = np.zeros( N+1 )
errorY = np.zeros( N+1 )
# Sum horizontally and vertically, keeping in mind diagonal is actually at x-1
for J in np.arange(0,N+1):
try:
errorX[J] = ( ( np.sum( errorXun[J,:]*weightsUn[J,:]) + np.sum(errorXun[:,J-1]*weightsUn[:,J-1]) ) /
( np.sum( weightsUn[J,:]) + np.sum(weightsUn[:,J-1]) ) )
except:
print( "Warning: per-frame error estimation failed, possibly due to zero-weight in solution solver" )
try:
errorY[J] = ( ( np.sum( errorYun[J,:]*weightsUn[J,:]) + np.sum(errorYun[:,J-1]*weightsUn[:,J-1]) ) /
( np.sum( weightsUn[J,:]) + np.sum(weightsUn[:,J-1]) ) )
except:
print( "Warning: per-frame error estimation failed, possibly due to zero-weight in solution solver" )
#### END WEIGHTED ERROR ############
# translations (to apply) are the negative of the found shifts
errorDict[u'translations'] = -np.vstack( (np.zeros([1,2]), np.cumsum( relativeEst, axis=0 ) ) )
errorDict[u'relativeEst'] = relativeEst
errorDict[u'acceptedEqns'] = acceptedEqns
# Not necessary to save triIndices, it's the non-zero elements of corrTriMat
# errorDict['triIndices'] = triIndices
errorDict[u'weights'] = weights
errorDict[u'errorXY'] = errorXY
errorDict[u'shiftsTriMat'] = shiftsTriMat_in
errorDict[u'errorX'] = errorX
errorDict[u'errorY'] = errorY
errorDict[u'errorUnraveled'] = errorUnraveled
errorDict[u'mean_errorNorm'] = mean_errorNorm
errorDict[u'std_errorNorm'] = std_errorNorm
errorDict[u'M'] = M
errorDict[u'Maccepted'] = Maccepted
return errorDict
def alignImageStack( self ):
"""
alignImageStack does a masked cross-correlation on a set of images.
masks can be a single mask, in which case it is re-used for each image, or
individual for each corresponding image.
Subpixel shifting is usually done with a large, shifted Lanczos resampling kernel.
This was found to be faster than with a phase gradient in Fourier space.
"""
# Setup threading, pyFFTW is set elsewhere in planning
if self.n_threads is None:
self.n_threads = nz.detect_number_of_cores()
else:
nz.set_num_threads( self.n_threads )
print( "Numexprz using %d threads and float dtype: %s" % (nz.nthreads, float_dtype) )
#Baseline un-aligned stack, useful for see gain reference problems
# self.unalignedSum = np.sum( self.images, axis=0 )
if np.any( self.shapeBinned ):
self.binStack()
# It's generally more robust to do the hot pixel filtering after binning
# from SuperRes.
if self.filterMode != None and 'hot' in self.filterMode.lower():
self.hotpixFilter()
# Do CTF measurement first, so we save processing if it can't fit the CTF
# Alternatively if CTFProgram == 'ctffind,sum' this is performed after alignment.
if bool(self.CTFProgram):
splitCTF = self.CTFProgram.lower().replace(' ','').split(',')
if len(splitCTF) == 1 and ( splitCTF[0] == u'ctffind' or splitCTF[0] == u'ctffind4.1'):
self.execCTFFind41( movieMode=True )
elif len(splitCTF) == 1 and ( splitCTF[0] == u'ctffind4' ):
self.execCTFFind4( movieMode=True )
elif len(splitCTF) == 1 and (splitCTF[0] == u'gctf'): # Requires CUDA and GPU
self.execGCTF( movieMode=True )
"""
Registration, first run: Call xcorrnm2_tri to do the heavy lifting
"""
if self.xcorrMode.lower() == 'zorro':
"""
Application of padding.
"""
if np.any(self.shapePadded):
self.padStack()
self.xcorrnm2_tri()
"""
Functional minimization over system of equations
"""
self.bench['solve0'] = time.time()
if self.triMode == u'first':
self.translations = -self.__shiftsTriMat[0,:]
self.errorDictList.append({})
self.errorDictList[-1][u'shiftsTriMat'] = self.__shiftsTriMat
self.errorDictList[-1][u'corrTriMat'] = self.__corrTriMat
self.errorDictList[-1][u'originTriMat'] = self.__originTriMat
self.errorDictList[-1][u'peaksigTriMat'] = self.__peaksigTriMat
self.errorDictList[-1][u'translations'] = self.translations.copy()
elif self.triMode == u'refine':
self.errorDictList.append({})
self.errorDictList[-1][u'shiftsTriMat'] = self.__shiftsTriMat
self.errorDictList[-1][u'corrTriMat'] = self.__corrTriMat
self.errorDictList[-1][u'originTriMat'] = self.__originTriMat
self.errorDictList[-1][u'peaksigTriMat'] = self.__peaksigTriMat
m = self.images.shape[0]
self.translations = np.zeros( [m,2], dtype='float32' )
for K in np.arange(m):
self.translations[K,:] = -self.__shiftsTriMat[K,K,:]
self.errorDictList[-1][u'translations'] = self.translations.copy()
else:
# Every round of shiftsSolver makes an error dictionary
self.shiftsSolver( self.__shiftsTriMat, self.__corrTriMat, self.__peaksigTriMat )
self.errorDictList[-1][u'originTriMat'] = self.__originTriMat
self.translations = self.errorDictList[-1][u'translations'].copy( order='C' )
self.bench['solve1'] = time.time()
"""
Alignment and projection through Z-axis (averaging)
"""
if np.any(self.shapePadded): # CROP back to original size
self.cropStack()
self.applyShifts()
elif self.xcorrMode.lower() == 'unblur v1.02':
self.xcorr2_unblur1_02()
elif self.xcorrMode.lower() == 'motioncorr v2.1':
self.xcorr2_mc2_1()
elif self.xcorrMode.lower() == 'move only':
pass
else:
raise ValueError( "Zorro.alignImageStack: Unknown alignment tool %s" % self.xcorrMode )
# Calculate CTF on aligned sum if requested
if bool(self.CTFProgram) and len(splitCTF) >= 2 and splitCTF[1]== u'sum':
if splitCTF[0] == u'ctffind' or splitCTF[0] == u'ctffind4.1':
self.execCTFFind41( movieMode=False )
elif splitCTF[0] == u'ctffind4':
self.execCTFFind4( movieMode=False )
elif splitCTF[0] == u'gctf': # Requires CUDA
self.execGCTF( movieMode=False )
if bool(self.doEvenOddFRC):
self.evenOddFouRingCorr()
elif bool(self.doLazyFRC): # Even-odd FRC has priority
self.lazyFouRingCorr()
# Apply filters as a comma-seperated list. Whitespace is ignored.
if bool( self.filterMode ):
splitFilter = self.filterMode.lower().replace(' ','').split(',')
if len(splitFilter) > 0:
self.bench['dose0'] = time.time()
for filt in splitFilter:
if filt == u"dose" and not "unblur" in self.xcorrMode.lower():
print( "Generating dose-filtered sum" )
# Dose filter will ALWAYS overwrite self.filtSum because it has to work with individual frames
self.doseFilter( normalize=False )
elif filt == u"dosenorm" and not "unblur" in self.xcorrMode.lower():
print( "Generating Fourier-magnitude normalized dose-filtered sum" )
# Dose filter will ALWAYS overwrite self.filtSum because it has to work with individual frames
self.doseFilter( normalize=True )
elif filt == u"background":
print( "Removing 2D Gaussian background from micrograph" )
if not np.any(self.filtSum):
self.filtSum = self.imageSum.copy()
self.filtSum -= util.backgroundEstimate( self.filtSum )
elif filt == u"gausslp":
print( "TODO: implement parameters for gauss filter" )
if not np.any(self.filtSum):
self.filtSum = self.imageSum.copy()
self.filtSum = scipy.ndimage.gaussian_filter( self.filtSum, 3.0 )
self.bench['dose1'] = time.time()
self.cleanPrivateVariables()
pass # End of alignImageStack
def cleanPrivateVariables(self):
"""
Remove all private ("__") variables so the memory they occupy is released.
"""
# TODO: go through the code and see if there's anything large leftover.
try: del self.__FFT2, self.__IFFT2
except: pass
try: del self.__subFFT2, self.__subIFFT2
except: pass
try: del self.__imageFFT
except: pass
try: del self.__Bfilter
except: pass
try: del self.__baseImageFFT, self.__baseMaskFFT, self.__baseSquaredFFT, self.__C
except: pass
def applyShifts( self ):
self.bench['shifts0'] = time.time()
# Apply centroid origin, or origin at frame #0 position?
if self.originMode == u'centroid':
centroid = np.mean( self.translations, axis=0 )
self.translations -= centroid
# if self.originMode == None do nothing
shifts_round = np.round( self.translations ).astype('int')
#shifts_remainder = self.translations - shifts_round
# Use RAMutil.imageShiftAndCrop to do a non-circular shift of the images to
# integer pixel shifts, then subpixel with Lanczos
m = self.images.shape[0] # image count
if self.subPixReg > 1.0 and self.shiftMethod == u'fourier':
# Fourier gradient subpixel shift
# Setup FFTs for shifting.
FFTImage = np.empty( self.shapePadded, dtype=fftw_dtype )
RealImage = np.empty( self.shapePadded, dtype=fftw_dtype )
normConst = 1.0 / (self.shapePadded[0]*self.shapePadded[1])
# Make pyFFTW objects
_, IFFT2 = util.pyFFTWPlanner( FFTImage, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ), effort = self.fftw_effort, n_threads=self.n_threads, doForward=False )
[xmesh, ymesh] = np.meshgrid( np.arange(-RealImage.shape[1]/2,RealImage.shape[1]/2) / np.float(RealImage.shape[1] ),
np.arange(-RealImage.shape[0]/2,RealImage.shape[0]/2)/np.float(RealImage.shape[0]) )
twoj_pi = np.complex64( -2.0j * np.pi )
for J in np.arange(0,m):
# Normalize and reduce to float32
tX = self.translations[J,1]; tY = ymesh*self.translations[J,0]
FFTImage = self.__imageFFT[J,:,:] * np.fft.fftshift( nz.evaluate( "exp(twoj_pi * (xmesh*tX + ymesh*tY))") )
IFFT2.update_arrays( FFTImage, RealImage ); IFFT2.execute()
# Normalize and reduce to float32
if self.images.shape[1] < RealImage.shape[0] or self.images.shape[2] < RealImage.shape[1]:
self.images[J,:,:] = np.real( nz.evaluate( "normConst * real(RealImage)" ) ).astype(self.images.dtype)[:self.images.shape[1],:self.images.shape[2]]
else:
self.images[J,:,:] = np.real( nz.evaluate( "normConst * real(RealImage)" ) ).astype(self.images.dtype)
if self.verbose: print( "Correction (fourier) "+ str(np.around(self.translations[J,:],decimals=4))+" applied to image: " + str(J) )
elif self.subPixReg > 1.0 and self.shiftMethod == u'lanczos':
# Lanczos realspace shifting
util.lanczosSubPixShiftStack( self.images, self.translations, n_threads=self.n_threads )
# Original unparallelized version
# shifts_remainder = self.translations - shifts_round
# for J in np.arange(0,m):
# # self.images[J,:,:] = util.imageShiftAndCrop( self.images[J,:,:], shifts_round[J,:] )
# #Roll the image instead to preserve information in the stack, in case someone deletes the original
# self.images[J,:,:] = np.roll( np.roll( self.images[J,:,:], shifts_round[J,0], axis=0 ), shifts_round[J,1], axis=1 )
#
# self.images[J,:,:] = util.lanczosSubPixShift( self.images[J,:,:], subPixShift=shifts_remainder[J,:], kernelShape=5, lobes=3 )
#
# if self.verbose: print( "Correction (lanczos) "+ str(np.around(self.translations[J,:],decimals=4))+" applied to image: " + str(J) )
else:
for J in np.arange(0,m):
# self.images[J,:,:] = util.imageShiftAndCrop( self.images[J,:,:], shifts_round[J,:] )
#Roll the image instead to preserve information in the stack, in case someone deletes the original
self.images[J,:,:] = np.roll( np.roll( self.images[J,:,:], shifts_round[J,0], axis=0 ), shifts_round[J,1], axis=1 )
if self.verbose: print( "Correction (integer) "+ str(shifts_round[J,:])+" applied to image: " + str(J) )
# Also do masks (single-pixel precision only) if seperate for each image
if not self.masks is None and self.masks.shape[0] > 1:
for J in np.arange(0,m):
self.masks[J,:,:] = util.imageShiftAndCrop( self.masks[J,:,:], shifts_round[J,:] )
# Build sum
self.imageSum = np.sum( self.images, axis=0 )
# Clean up numexpr pointers
try: del normConst, tX, tY, twoj_pi
except: pass
self.bench['shifts1'] = time.time()
def __lanczosSubPixShiftStack( self ):
tPool = mp.ThreadPool( self.n_threads )
slices = self.images.shape[0]
# Build parameters list for the threaded processeses, consisting of index
tArgs = [None] * slices
for J in np.arange(slices):
tArgs[J] = (J, self.images, self.translations)
# All operations are done 'in-place'
tPool.map( util.lanczosIndexedShift, tArgs )
tPool.close()
tPool.join()
pass
def binStack( self, binKernel = 'fourier' ):
"""
binKernel can be 'lanczos2' or 'fourier', which does a Lanczos resampling or Fourier cropping,
respectively. Lanczos kernel can only resample by powers of 2 at present.
The Lanczos kernel has some aliasing problems at present so it's use isn't advised yet.
"""
self.bench['bin0'] = time.time()
bShape2 = (np.array( self.shapeBinned ) / 2).astype('int')
binScale = np.array( [self.images.shape[1], self.images.shape[2]] ) / np.array( self.shapeBinned )
self.pixelsize *= np.mean( binScale )
print( "Binning stack from %s to %s" % (str(self.images.shape[1:]),str(self.shapeBinned)))
if binKernel == u'lanczos2':
import math
binFact = [ np.floor( math.log( binScale[0], 2 ) ) ** 2, np.floor( math.log( binScale[1], 2 ) ) ** 2]
# Add some error checking if binShape isn't precisely the right size.
print( "binFact = " + str(binFact) )
# 2nd order Lanczos kernel
lOrder = 2
xWin = np.arange( -lOrder, lOrder + 1.0/binFact[1], 1.0/binFact[1] )
yWin = np.arange( -lOrder, lOrder + 1.0/binFact[0], 1.0/binFact[0] )
xWinMesh, yWinMesh = np.meshgrid( xWin, yWin )
rmesh = np.sqrt( xWinMesh*xWinMesh + yWinMesh*yWinMesh )
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
windowKernel = (lOrder/(np.pi*np.pi*rmesh*rmesh)) * np.sin( np.pi / lOrder * rmesh ) * np.sin( np.pi * rmesh )
windowKernel[ yWin==0, xWin==0 ] = 1.0
print( windowKernel.shape )
binArray = np.zeros( [self.images.shape[0], self.shapeBinned[0], self.shapeBinned[1]], dtype='float32' )
for J in np.arange( self.images.shape[0] ):
# TODO: switch from squarekernel to an interpolator so we can use non-powers of 2
binArray[J,:,:] = util.squarekernel( scipy.ndimage.convolve( self.images[J,:,:], windowKernel ),
k= binFact[0] )
elif binKernel == u'fourier':
binArray = np.zeros( [self.images.shape[0], self.shapeBinned[0], self.shapeBinned[1]], dtype='float32' )
FFTImage = np.zeros( [ self.images.shape[1], self.images.shape[2] ], dtype=fftw_dtype)
FFTBinned = np.zeros( self.shapeBinned, dtype=fftw_dtype )
FFT2, _ = util.pyFFTWPlanner( FFTImage, FFTImage,
wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ),
effort = self.fftw_effort, n_threads=self.n_threads, doReverse=False )
_, IFFT2bin = util.pyFFTWPlanner( FFTBinned, FFTBinned,
wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ),
effort = self.fftw_effort, n_threads=self.n_threads, doForward=False )
ImageBinned = np.zeros( self.shapeBinned, dtype=fftw_dtype )
normConst = 1.0 / (self.shapeBinned[0]*self.shapeBinned[1])
for J in np.arange( self.images.shape[0] ):
FFT2.update_arrays( self.images[J,:,:].astype( fftw_dtype ), FFTImage ); FFT2.execute()
# Crop
FFTBinned[:bShape2[0],:bShape2[1]] = FFTImage[:bShape2[0],:bShape2[1]]
FFTBinned[:bShape2[0], -bShape2[1]:] = FFTImage[:bShape2[0], -bShape2[1]:]
FFTBinned[-bShape2[0]:,:bShape2[1]] = FFTImage[-bShape2[0]:,:bShape2[1]]
FFTBinned[-bShape2[0]:,-bShape2[1]:] = FFTImage[-bShape2[0]:,-bShape2[1]:]
# Normalize
FFTBinned *= normConst
# Invert
IFFT2bin.update_arrays( FFTBinned, ImageBinned ); IFFT2bin.execute()
# De-complexify
binArray[J,:,:] = np.real( ImageBinned )
pass
pass
del self.images
self.images = binArray
self.bench['bin1'] = time.time()
def padStack( self, padSize=None, interiorPad=0 ):
"""
This function is used to zero-pad both the images and masks. This breaks
the circular shift issues.
Defaults to self.shapePadded
It can also improve performance as FFTW is fastest for dimensions that are powers of 2,
and still fast for powers of 2,3, and 5. Wierd dimensions then should be padded
to an optimized size, which the helper function FindValidFFTDim can provide good
guesses for.
In general try to have 20 % of your total number of pixels within the mask to reduce
floating-point round-off error in the masked cross-correlation.
"""
# Take the stack and zero-pad it
# Unfortunately this step is memory intensive as we need to make a new array
# to copy the values of the old one into.
self.bench['pad0'] = time.time()
if padSize is None:
padSize = self.shapePadded
if not np.any(padSize):
print( "Cannot pad to: " + str(padSize) )
return
m = self.images.shape[0]
self.shapeOriginal = [ self.images.shape[1], self.images.shape[2] ]
self.shapePadded = padSize # This needs to be recorded for undoing the padding operation
print( "Padding images and masks to shape: " + str(padSize) )
paddedImages = np.zeros( [m, padSize[0], padSize[1]], dtype=self.images.dtype )
paddedImages[:,:self.shapeOriginal[0],:self.shapeOriginal[1]] = self.images
self.images = paddedImages
# Then make or pad the mask appropriately.
if self.masks is None:
self.masks = np.zeros( [1,padSize[0],padSize[1]], dtype='bool', order='C' )
if interiorPad > 0:
self.masks[0, interiorPad:self.shapeOriginal[0]-interiorPad,
interiorPad:self.shapeOriginal[1]-interiorPad] = 1.0
else:
self.masks[0,:self.shapeOriginal[0], :self.shapeOriginal[1] ] = 1.0
else:
if self.masks.shape[1] != self.shapePadded[0] and self.masks.shape[2] != self.shapePadded[1]:
mmask = self.masks.shape[0]
paddedMasks = np.zeros( [mmask, padSize[0], padSize[1]], dtype=self.masks.dtype )
paddedMasks[:,:self.shapeOriginal[0],:self.shapeOriginal[1]] = self.masks
self.masks = paddedMasks
pass # else do nothing
pass
self.bench['pad1'] = time.time()
def cropStack( self, cropSize=None ):
"""
Undos the operation from ImageRegistrator.padStack()
Defaults to self.shapeOriginal.
"""
if cropSize is None:
cropSize = self.shapeOriginal
if not bool(cropSize):
print( "Cannot crop to: " + str(cropSize) )
return
print( "Cropping auto-applied mask pixels back to shape: " + str(self.shapeOriginal) )
self.images = self.images[ :, :cropSize[0], :cropSize[1] ]
# Crop masks too
self.masks = self.masks[ :, :cropSize[0], :cropSize[1] ]
# And sum if present
if self.imageSum is not None:
self.imageSum = self.imageSum[ :cropSize[0], :cropSize[1] ]
def cdfLogisticCurve( self, errIndex = -1, bins = None ):
"""
Calculate the cumulative distribution function of the peak significance scores, and fit a logistic
curve to them, for deriving a weighting function.
"""
# The error dict list doesn't normally exist here.
peaksigTriMat = self.errorDictList[errIndex]['peaksigTriMat']
peaksigs = peaksigTriMat[ peaksigTriMat > 0.0 ]
if bins == None:
bins = np.int( peaksigs.size/7.0 )
[pdfPeaks, hSigma ] = np.histogram( peaksigs, bins=bins )
hSigma = hSigma[:-1]
pdfPeaks = pdfPeaks.astype( 'float32' )
cdfPeaks = np.cumsum( pdfPeaks )
cdfPeaks /= cdfPeaks[-1]
# BASIN-HOPPING
basinArgs = {}
bounds = ( (np.min(peaksigs), np.max(peaksigs)), (0.1, 20.0), (0.05, 5.0) )
basinArgs[u"bounds"] = bounds
basinArgs[u"tol"] = 1E-6
basinArgs[u"method"] = u"L-BFGS-B"
basinArgs[u"args"] = ( hSigma, cdfPeaks )
# x is [SigmaThres, K, Nu, background]
x0 = [np.mean(peaksigs), 5.0, 1.0]
outBasin = scipy.optimize.basinhopping( util.minLogistic, x0, niter=50, minimizer_kwargs=basinArgs )
# Set the logistics curve appropriately.
self.peaksigThres = outBasin.x[0]
self.logisticK = outBasin.x[1]
self.logisticNu = outBasin.x[2]
# Diagnostics (for plotting)
self.errorDictList[errIndex][u'pdfPeaks'] = pdfPeaks
self.errorDictList[errIndex][u'cdfPeaks'] = cdfPeaks
self.errorDictList[errIndex][u'hSigma'] = hSigma
self.errorDictList[errIndex][u'logisticNu'] = self.logisticNu
self.errorDictList[errIndex][u'logisticK'] = self.logisticK
self.errorDictList[errIndex][u'peaksigThres'] = self.peaksigThres
pass
def velocityCull( self, velocityThres=None ):
"""
Computes the pixel velocities, using a 5-point numerical differentiation on the
translations. Note that a 5-point formula inherently has some low-pass filtering
built-in.
TODO: this would be better of using a spline interpolation (def smoothTrajectory() ) to
estimate the local velocity than numerical differentiation.
if velocityThres == None, self.velocityThres is used.
if velocityThres < 0.0, no thresholding is applied (i.e. good for just
computing the velocity to produce plots)
"""
velo_diff2 = np.diff( self.translations, axis=0 )
speed_diff2 = np.sqrt( np.sum( velo_diff2**2.0, axis=1 ))
self.velocities = np.zeros( [self.translations.shape[0]] )
self.velocities[0] = speed_diff2[0]
self.velocities[1:-1] = 0.5*(speed_diff2[:-1] + speed_diff2[1:])
self.velocities[-1] = speed_diff2[-1]
# Establish what velocities we should crop?
plt.figure()
plt.plot( np.arange(0,self.velocities.shape[0]), self.velocities, 'o-k' )
plt.xlabel( 'Frame number, m' )
plt.ylabel( 'Pixel velocity, v (pix/frame)' )
# TODO: this is fairly useless due to noise, properly minimum-acceleration splines fits would work
# much better I suspect
print( "Velocity culling still under development, useful only for diagnostics at present." )
pass
def smoothTrajectory( self, dampen = 0.5 ):
"""
Fit a dampened spline to the translations. This seems to be most useful for refinement as it has been
shown in UnBlur to help break correlated noise systems. It reminds me a bit of simulated annealing
but the jumps aren't random.
dampen should be std of position estimates, so about 0.25 - 1.0 pixels. If generating smoothing for
velocity estimation use a higher dampening factor.
"""
if np.any( self.translations ) == None:
print( "smoothTrajectory requires an estimate for translations" )
return
import scipy.interpolate
frames = np.arange( self.translations.shape[0] )
ySplineObj = scipy.interpolate.UnivariateSpline( frames, self.translations[:,0], k=5, s=dampen )
xSplineObj = scipy.interpolate.UnivariateSpline( frames, self.translations[:,1], k=5, s=dampen )
smoothedTrans = np.zeros( self.translations.shape )
smoothedTrans[:,0] = ySplineObj(frames); smoothedTrans[:,1] = xSplineObj(frames)
return smoothedTrans
def calcIncoherentFourierMag( self ):
"""
Compute the Fourier transform of each frame in the movie and average
the Fourier-space magnitudes. This gives a baseline to compare how
well the alignment did vesus the spatial information content of the
individual images.
This is the square root of the power spectrum.
"""
frameFFT = np.empty( self.images.shape[1:], dtype=fftw_dtype )
self.incohFouMag = np.zeros( self.images.shape[1:], dtype=float_dtype )
FFT2, _ = util.pyFFTWPlanner( frameFFT, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ), n_threads = self.n_threads, doReverse=False )
for J in np.arange(0,self.images.shape[0]):
FFT2.update_arrays( np.squeeze( self.images[J,:,:]).astype(fftw_dtype), frameFFT ); FFT2.execute()
self.incohFouMag += np.abs( frameFFT )
pass
self.incohFouMag = np.fft.fftshift( self.incohFouMag / self.images.shape[0] )
def evenOddFouRingCorr( self, xcorr = 'tri', box=[512,512], overlap=0.5, debug=False ):
"""
Seperates the frames into even and odd frames and tries to calculate a
Fourier Ring Correlation (FRC) from the two sets. Oscillations in the
FRC are normal for this application because of the objective contrast
transfer function. Note: this function is not well-optimized. It reloads
the data from disk several times to conserve memory.
THIS FUNCTION DESTROYS THE DATA IN THE OBJECT.
xcorr = 'tri' uses the zorro approach.
xcorr = 'mc' tries to use dosefgpu_driftcorr (Motioncorr)
xcorr = 'unblur' uses UnBlur
box is the shape of the moving window, and limits the maximum
resolution the FRC is calculated to.
If you plan to run both, use 'mc' first.
"""
self.bench['frc0'] = time.time()
m = self.images.shape[0]
evenIndices = np.arange(0, m, 2)
oddIndices = np.arange(1, m, 2)
original_configName = self.files[u'config']
import uuid
tempLogName = str(uuid.uuid4() ) + u".zor"
self.saveConfig( tempLogName )
self.files[u'config'] = original_configName # Restore original configuration file.
evenReg = ImageRegistrator()
evenReg.loadConfig( tempLogName )
evenReg.images = self.images[evenIndices,:,:].copy(order='C')
oddReg = ImageRegistrator()
oddReg.loadConfig( tempLogName )
oddReg.images = self.images[oddIndices,:,:].copy(order='C')
if xcorr == u'tri' or xcorr is None:
if self.masks is None:
evenReg.masks = util.edge_mask( maskShape=[ self.images.shape[1], self.images.shape[2] ] )
oddReg.masks = evenReg.masks
elif self.masks.shape[0] > 1:
evenReg.masks = self.masks[evenIndices,:,:]
oddReg.masks = self.masks[oddIndices,:,:]
elif self.masks.shape[0] == 1:
evenReg.masks = self.masks
oddReg.masks = self.masks
print( "##### Zorro even frames alignment #####" )
evenReg.alignImageStack()
self.transEven = evenReg.translations.copy( order='C' )
print( "##### Zorro odd frames alignment #####" )
oddReg.alignImageStack()
self.transOdd = oddReg.translations.copy( order='C' )
elif xcorr == 'mc':
print( "##### Motioncorr even frames alignment #####" )
evenReg.xcorr2_mc( loadResult = False )
evenReg.applyShifts()
self.transEven = evenReg.translations.copy( order='C' )
print( "##### Motioncorr odd frames alignment #####" )
oddReg.xcorr2_mc( loadResult = False )
oddReg.applyShifts()
self.transOdd = oddReg.translations.copy( order='C' )
elif xcorr == 'unblur':
print( "##### UnBlur even frames alignment #####" )
evenReg.xcorr2_unblur( loadResult=False )
evenReg.applyShifts()
self.transEven = evenReg.translations.copy( order='C' )
print( "##### UnBlur odd frames alignment #####" )
oddReg.xcorr2_unblur( loadResult=False )
oddReg.applyShifts()
self.transOdd = oddReg.translations.copy( order='C' )
else:
print( "Unknown xcorr method for even-odd FRC: " + str(xcorr) )
print( "##### Computing even-odd Fourier ring correlation #####" )
eoReg = ImageRegistrator()
eoReg.loadConfig( tempLogName )
eoReg.images = np.empty( [2, evenReg.imageSum.shape[0], evenReg.imageSum.shape[1] ], dtype=float_dtype)
eoReg.images[0,:,:] = evenReg.imageSum; eoReg.images[1,:,:] = oddReg.imageSum
eoReg.triMode = u'first'
try: os.remove( tempLogName )
except: print( "Could not remove temporary log file: " + tempLogName )
# This actually aligns the two phase images
# We use Zorro for this for all methods because we have more trust in the masked, normalized
# cross correlation
eoReg.alignImageStack()
# Save the aligned eoReg images for subZorro use
stackFront = os.path.splitext( self.files[u'sum'] )[0]
if not 'compressor' in self.files or not bool(self.files['compressor']):
mrcExt = ".mrc"
else:
mrcExt = ".mrcz"
mrcz.writeMRC( evenReg.imageSum, u"%s_even%s" % (stackFront, mrcExt ),
compressor=self.files[u'compressor'], clevel=self.files[u'clevel'], n_threads=self.n_threads)
mrcz.writeMRC( oddReg.imageSum, u"%s_odd%s" % (stackFront, mrcExt ),
compressor=self.files[u'compressor'], clevel=self.files[u'clevel'], n_threads=self.n_threads)
eoReg.tiledFRC( eoReg.images[0,:,:], eoReg.images[1,:,:],
trans=np.hstack( [self.transEven, self.transOdd] ), box=box, overlap=overlap )
self.FRC2D = eoReg.FRC2D
self.FRC = eoReg.FRC
if self.saveC:
self.evenC = evenReg.C
self.oddC = oddReg.C
self.bench['frc1'] = time.time()
return evenReg, oddReg
def lazyFouRingCorr( self, box=[512,512], overlap=0.5, debug=False ):
"""
Computes the FRC from the full stack, taking even and odd frames for the half-sums
These are not independent half-sets! ... but it still gives us a decent impression
of alignment success or failure, and it's very fast.
"""
self.bench['frc0'] = time.time()
m = self.images.shape[0]
evenIndices = np.arange(0, m, 2)
oddIndices = np.arange(1, m, 2)
evenSum = np.sum( self.images[evenIndices,:,:], axis=0 )
oddSum = np.sum( self.images[oddIndices,:,:], axis=0 )
self.tiledFRC( evenSum, oddSum, box=box, overlap=overlap )
# Force the length to be box/2 because the corners are poorly sampled
self.FRC = self.FRC[: np.int(box[0]/2)]
self.bench['frc1'] = time.time()
def tiledFRC( self, Image1, Image2, trans=None, box=[512,512], overlap=0.5 ):
"""
Pass in two images, which are ideally averages from two independently processed half-sets.
Compute the FRC in many tiles of shape 'box', and average the FRC over all tiles.
Overlap controls how much tiles overlap by, with 0.5 being half-tiles and 0.0 being no overlap,
i.e. they are directly adjacent. Negative overlaps may be used for sparser samping.
Produced both a 2D FRC, which is generally of better quality than a power-spectrum, and
"""
FFT2, _ = util.pyFFTWPlanner( np.zeros(box, dtype=fftw_dtype),
wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) , n_threads = self.n_threads,
effort=self.fftw_effort, doReverse=False )
if overlap > 0.8:
print("tiledFRC takes huge amounts of time as overlap->1.0" )
overlap = 0.8
if trans is None:
trans = self.translations
minCrop = 5
if not np.any(trans):
cropLim = np.array( [minCrop,minCrop,minCrop,minCrop] ) # Keep away from any edge artifacts
else:
yendcrop = -np.minimum( np.floor( trans[:,0].min() ), minCrop )
xendcrop = -np.minimum( np.floor( trans[:,1].min() ), minCrop )
ystartcrop = np.maximum( np.ceil( trans[:,0].max() ), minCrop )
xstartcrop = np.maximum( np.ceil( trans[:,1].max() ), minCrop )
cropLim = np.array( [ystartcrop, xstartcrop, yendcrop, xendcrop] )
hann = util.apodization( name=u'hann', shape=box ).astype(float_dtype)
tilesX = np.floor( np.float( Image1.shape[1] - cropLim[1] - cropLim[3] - box[1])/ box[1] / (1.0-overlap) ).astype('int')
tilesY = np.floor( np.float( Image1.shape[0] - cropLim[0] - cropLim[2] - box[0])/ box[0] / (1.0-overlap) ).astype('int')
if self.verbose >= 2:
print( "Tiles for FRC: " + str( tilesX) + ":" + str(tilesY))
FFTEven = np.zeros( box, dtype=fftw_dtype )
FFTOdd = np.zeros( box, dtype=fftw_dtype )
normConstBox = np.float32( 1.0 / FFTEven.size**2 )
FRC2D = np.zeros( box, dtype=float_dtype )
for I in np.arange(0,tilesY):
for J in np.arange(0,tilesX):
offset = np.array( [ I*box[0]*(1.0-overlap)+cropLim[0], J*box[1]*(1.0-overlap)+cropLim[1] ]).astype('int')
tileEven = (hann*Image1[offset[0]:offset[0]+box[0], offset[1]:offset[1]+box[1] ]).astype(fftw_dtype)
FFT2.update_arrays( tileEven, FFTEven ); FFT2.execute()
tileOdd = (hann*Image2[offset[0]:offset[0]+box[0], offset[1]:offset[1]+box[1] ]).astype(fftw_dtype)
FFT2.update_arrays( tileOdd, FFTOdd ); FFT2.execute()
FFTOdd *= normConstBox
FFTEven *= normConstBox
# Calculate the normalized FRC in 2-dimensions
# FRC2D += nz.evaluate( "real(FFTEven*conj(FFTOdd)) / sqrt(real(abs(FFTOdd)**2) * real(abs(FFTEven)**2) )" )
# Some issues with normalization?
FRC2D += nz.evaluate( "real(FFTEven*conj(FFTOdd)) / sqrt(real(FFTOdd*conj(FFTOdd)) * real(FFTEven*conj(FFTEven)) )" )
# Normalize
FRC2D /= FRC2D[0,0]
FRC2D = np.fft.fftshift( FRC2D )
rotFRC, _ = util.rotmean( FRC2D )
self.FRC = rotFRC
self.FRC2D = FRC2D
def localFRC( self, box=[256,256], overlap=0.5 ):
# Only work on the even and odd frames?
m = self.images.shape[0]
box2 = (np.array(box)/2).astype('int')
evenIndices = np.arange(0, m, 2)
oddIndices = np.arange(1, m, 2)
center = 2048
evenBox = np.sum( self.images[evenIndices, center-box2[0]:center+box2[0], center-box2[1]:center+box2[1] ], axis=0 )
oddBox = np.sum( self.images[oddIndices, center-box2[0]:center+box2[0], center-box2[1]:center+box2[1] ], axis=0 )
FFTEven = np.zeros( box, dtype=fftw_dtype )
FFTOdd = np.zeros( box, dtype=fftw_dtype )
normConstBox = np.float32( 1.0 / FFTEven.size**2 )
FFT2, _ = util.pyFFTWPlanner( np.zeros(box, dtype=fftw_dtype),
wisdomFile=os.path.join( self.cachePath, u"fftw_wisdom.pkl" ) , n_threads = self.n_threads,
effort=self.fftw_effort, doReverse=False )
FFT2.update_arrays( evenBox, FFTEven ); FFT2.execute()
FFT2.update_arrays( oddBox, FFTOdd ); FFT2.execute()
FFTOdd *= normConstBox
FFTEven *= normConstBox
FRC2D = nz.evaluate( "real(FFTEven*conj(FFTOdd)) / sqrt(real(FFTOdd*conj(FFTOdd)) * real(FFTEven*conj(FFTEven)) )" )
FRC2D /= FRC2D[0,0]
FRC2D = np.fft.fftshift( FRC2D )
rotFRC, _ = util.rotmean( FRC2D )
plt.figure()
plt.plot( rotFRC )
plt.title( "Local FRC over box = " + str(box) )
def doseFilter( self, normalize=False ):
"""
This is a port from Grant's electron_dose.f90 from UnBlur. It uses fixed critical dose factors
to apply filters to each image based on their accumulated dose. We can potentially use
high-dose detector like the Medipix to determine these dose factors in advance, on a per-protein
basis. However in that case the assumption is that radiation damage measured from diffraction of crystals
results accurately contrast, which is perhaps not true for single particle.
dosePerFrame by default is estimated from the data. If zorroReg.gain = None, we assume
the input numbers are in electrons.
missingStartFrames is for data that has the starting x frames removed. It will guess (based on the gain if
present) the missing total dose.
Paramaters are set as follows:
zorroReg.doseFiltParam = [dosePerFrame, critDoseA, critDoseB, critDoseC, cutoffOrder, missingStartFrame]
When using a tight objective aperture and a GIF and thicker ice it's best to record the dose
rate in a hole and set self.doseFiltParam[0] appropriately, in terms of electrons per pixel per frame
Also fits a 2D gaussian to the image background and subtracts it. This improves performance of particle
picking tools such as Gauto match, and keeps all the intensities uniform for Relion's group scale correction.
This can be used with Zorro's particle extraction routines.
"""
# print( "DEBUG 1: doseFilter: imageSum # nans %d" % np.sum(np.isnan(self.imageSum) ) )
critDoseA = np.float32( self.doseFiltParam[1] )
critDoseB = np.float32( self.doseFiltParam[2] )
critDoseC = np.float32( self.doseFiltParam[3] )
cutoffOrder = np.float32( self.doseFiltParam[4] )
if not bool( self.voltage ):
self.METAstatus = u"error"
self.saveConfig()
raise ValueError( "Accelerating voltage not set in advance for dose filtering" )
voltageScaling = np.float32( np.sqrt( self.voltage / 300.0 ) ) # increase in radiolysis at lower values.
# It looks like they build some mesh that is sqrt(qxmesh + qymesh) / pixelsize
# I think this is probably just qmesh in inverse Angstroms (keeping in mind Zorro's internal
# pixelsize is nm)
m = self.images.shape[0]
N = self.shapePadded[0]
M = self.shapePadded[1]
invPSx = np.float32( 1.0 / (M*(self.pixelsize*10)) )
invPSy = np.float32( 1.0 / (N*(self.pixelsize*10)) )
xmesh, ymesh = np.meshgrid( np.arange(-M/2,M/2), np.arange(-N/2,N/2))
xmesh = xmesh.astype(float_dtype); ymesh = ymesh.astype(float_dtype)
#print( "xmesh.dtype: %s" % xmesh.dtype )
qmesh = nz.evaluate( "sqrt(xmesh*xmesh*(invPSx**2) + ymesh*ymesh*(invPSy**2))" )
#print( "qmesh.dtype: %s" % qmesh.dtype )
qmesh = np.fft.fftshift( qmesh )
#print( "qmesh.dtype: %s" % qmesh.dtype )
# Since there's a lot of hand waving, let's assume dosePerFrame is constant
# What about on a GIF where the observed dose is lower due to the filter? That can be incorporated
# with a gain estimator.
if self.doseFiltParam[0] == None:
totalDose = np.mean( self.imageSum )
dosePerFrame = totalDose / m
missingDose = dosePerFrame * np.float32( self.doseFiltParam[5] )
else:
dosePerFrame = self.doseFiltParam[0]
accumDose = np.zeros( m + 1, dtype=float_dtype )
accumDose[1:] = np.cumsum( np.ones(m) * dosePerFrame )
accumDose += missingDose
# optimalDose = 2.51284 * critDose
critDoseMesh = nz.evaluate( "voltageScaling*(critDoseA * qmesh**critDoseB + critDoseC)" )
#critDoseMesh[N/2,M/2] = 0.001 * np.finfo( 'float32' ).max
critDoseMesh[ np.int(N/2), np.int(M/2)] = critDoseMesh[ np.int(N/2), np.int(M/2)-1]**2
#print( "critDoseMesh.dtype: %s" % critDoseMesh.dtype )
# We probably don't need an entire mesh here...
qvect = (np.arange(0,self.shapePadded[0]/2) * np.sqrt( invPSx*invPSy ) ).astype( float_dtype )
optiDoseVect = np.zeros( int(self.shapePadded[0]/2), dtype=float_dtype )
optiDoseVect[1:] = np.float32(2.51284)*voltageScaling*(critDoseA * qvect[1:]**critDoseB + critDoseC)
optiDoseVect[0] = optiDoseVect[1]**2
#print( "optiDoseVect.dtype: %s" % optiDoseVect.dtype )
padWidth = np.array(self.shapePadded) - np.array(self.imageSum.shape)
doseFilteredSum = np.zeros( self.shapePadded, dtype=fftw_dtype )
filterMag = np.zeros( self.shapePadded, dtype=float_dtype )
FFTimage = np.empty( self.shapePadded, dtype=fftw_dtype )
# zorroReg.filtSum = np.zeros_like( zorroReg.imageSum )
FFT2, IFFT2 = util.pyFFTWPlanner( doseFilteredSum, wisdomFile=os.path.join( self.cachePath, "fftw_wisdom.pkl" ) ,
effort = self.fftw_effort, n_threads=self.n_threads )
for J in np.arange(0,m):
print( "Filtering for dose: %.2f e/A^2"% (accumDose[J+1]/(self.pixelsize*10)**2) )
doseFinish = accumDose[J+1] # Dose at end of frame period
doseStart = accumDose[J] # Dose at start of frame period
# qmesh is in reciprocal angstroms, so maybe I can ignore how they build the mesh and
# use a matrix meshgrid
minusHalfDose = np.float32( -0.5*doseFinish )
filt = nz.evaluate( "exp( minusHalfDose/critDoseMesh)")
#print( "filt.dtype: %s" % filt.dtype )
thresQ = qvect[ np.argwhere( np.abs(doseFinish - optiDoseVect) < np.abs(doseStart - optiDoseVect) )[-1] ]
# thres = nz.evaluate( "abs(doseFinish - optiDoseMesh) < abs(doseStart - optiDoseMesh)" )
# This filter step is slow, try to do this analytically? Can we find the radius from the above equation?
# thres = scipy.ndimage.gaussian_filter( thres.astype(zorro.float_dtype), cutoffSigma )
thres = nz.evaluate( "exp( -(qmesh/thresQ)**cutoffOrder )" )
#print( "thres.dtype: %s" % thres.dtype )
#print( "qmesh.dtype: %s" % qmesh.dtype )
#print( "thresQ.dtype: %s" % thresQ.dtype )
#print( "cutoffOrder.dtype: %s" % cutoffOrder.dtype )
# Numpy's pad is also quite slow
paddedImage = np.pad( self.images[J,:,:].astype(fftw_dtype),
((0,padWidth[0]),(0,padWidth[1])), mode=symmetricPad )
FFT2.update_arrays( paddedImage, FFTimage ); FFT2.execute()
# print( "FFTimage.dtype: %s" % FFTimage.dtype )
# Adding Fourier complex magntiude works fine
if bool(normalize):
currentFilter = nz.evaluate( "thres*filt" )
filterMag += currentFilter
doseFilteredSum += nz.evaluate( "FFTimage * currentFilter" )
else:
doseFilteredSum += nz.evaluate( "FFTimage * thres * filt" )
pass
# print( "doseFilteredSum.dtype: %s" % doseFilteredSum.dtype )
if bool( normalize ):
alpha = np.float32(1.0) # Prevent divide by zero errors by adding a fixed factor of unity before normalizing.
filterMag = np.float32(1.0) / ( filterMag + alpha )
# Using FFTimage as a temporary array
IFFT2.update_arrays( doseFilteredSum*filterMag, FFTimage ); IFFT2.execute()
else:
# Using FFTimage as a temporary array
IFFT2.update_arrays( doseFilteredSum, FFTimage ); IFFT2.execute()
self.filtSum = np.abs( FFTimage[:self.imageSum.shape[0],:self.imageSum.shape[1]] )
# print( "filtSum.dtype: %s" % self.filtSum.dtype )
del invPSx, invPSy, qmesh, optiDoseVect, doseFinish, doseStart, critDoseA, critDoseB, critDoseC,
del voltageScaling, filt, thres, thresQ, cutoffOrder, minusHalfDose
def hotpixFilter( self, cutoffLower=None, cutoffUpper=None, neighbourThres = 0.01 ):
"""
Identifies and removes hot pixels using a stocastic weighted approach.
replaced with a Gaussian filter. Hot pixels do not affect Zorro too much
due to the intensity-normalized cross-correlation but the tracks of the
hot pixels do upset other software packages.
PSF is used to provide a camera-specific PSF to filter hot pixels. If
you have an MTF curve for a detector we can provide a psf tailored to that
particular device, otherwise use None for a uniform filter.
"""
self.bench['hot0'] = time.time()
# 3 x 3 kernels
if self.hotpixInfo[u"psf"] == u"K2":
psf = np.array( [0.0, 0.173235968], dtype=float_dtype )
else: # default to uniform filter
psf = np.array( [0.0, 1.0], dtype=float_dtype )
psfKernel = np.array( [ [psf[1]*psf[1], psf[1], psf[1]*psf[1] ],
[psf[1], 0.0, psf[1] ],
[psf[1]*psf[1], psf[1], psf[1]*psf[1] ]], dtype=float_dtype )
psfKernel /= np.sum( psfKernel )
if self.images.ndim == 2:
# Mostly used when processing flatfields for gain reference normalization
self.images = np.reshape( self.images, [1, self.images.shape[0], self.images.shape[1]])
MADE_3D = True
else:
MADE_3D = False
unalignedSum = np.sum( self.images, axis=0 )
sumMean = np.mean( unalignedSum )
poissonStd = np.sqrt( sumMean )
histBins = np.arange( np.floor( sumMean - self.hotpixInfo[u"maxSigma"]*poissonStd)-0.5, np.ceil(sumMean+self.hotpixInfo[u"maxSigma"]*poissonStd)+0.5, 1 )
unalignedHist, unalignedCounts = np.histogram( unalignedSum, histBins )
unalignedHist = unalignedHist.astype(float_dtype);
# Make unalignedCounts bin centers rather than edges
unalignedCounts = unalignedCounts[:-1].astype(float_dtype)
unalignedCounts += 0.5* (unalignedCounts[1]-unalignedCounts[0])
# Here we get sigma values from the CDF, which is smoother than the PDF due
# to the integration applied.
cdfHist = np.cumsum( unalignedHist )
cdfHist /= cdfHist[-1]
###################################
# Optimization of mean and standard deviation
# TODO: add these stats to the object
def errorNormCDF( params ):
return np.sum( np.abs( cdfHist -
scipy.stats.norm.cdf( unalignedCounts, loc=params[0], scale=params[1] ) ) )
bestNorm = scipy.optimize.minimize( errorNormCDF, (sumMean,poissonStd),
method="L-BFGS-B",
bounds=((sumMean-0.5*poissonStd, sumMean+0.5*poissonStd),
(0.7*poissonStd, 1.3*poissonStd) ) )
#####################################
sigmaFromCDF = np.sqrt(2) * scipy.special.erfinv( 2.0 * cdfHist - 1 )
normalSigma = (unalignedCounts - bestNorm.x[0]) / bestNorm.x[1]
errorNormToCDF = normalSigma - sigmaFromCDF
keepIndices = ~np.isinf( errorNormToCDF )
errorNormToCDF = errorNormToCDF[keepIndices]
normalSigmaKeep = normalSigma[keepIndices]
# Try for linear fits, resort to defaults if it fails
if not bool(cutoffLower):
try:
lowerIndex = np.where( errorNormToCDF > -0.5 )[0][0]
lowerA = np.array( [normalSigmaKeep[:lowerIndex], np.ones(lowerIndex )] )
lowerFit = np.linalg.lstsq( lowerA.T, errorNormToCDF[:lowerIndex] )[0]
cutoffLower = np.float32( -lowerFit[1]/lowerFit[0] )
self.hotpixInfo[u'cutoffLower'] = float( cutoffLower )
except:
print( "zorro.hotpixFilter failed to estimate bound for dead pixels, defaulting to -4.0" )
cutoffLower = np.float32( self.hotpixInfo['cutoffLower'] )
if not bool(cutoffUpper):
try:
upperIndex = np.where( errorNormToCDF < 0.5 )[0][-1]
upperA = np.array( [normalSigmaKeep[upperIndex:], np.ones( len(normalSigmaKeep) - upperIndex )] )
upperFit = np.linalg.lstsq( upperA.T, errorNormToCDF[upperIndex:] )[0]
cutoffUpper = np.float32( -upperFit[1]/upperFit[0] )
self.hotpixInfo[u'cutoffUpper'] = float( cutoffUpper )
except:
print( "zorro.hotpixFilter failed to estimate bound for hot pixels, defaulting to +3.25" )
cutoffUpper = np.float32( self.hotpixInfo['cutoffUpper'] )
unalignedSigma = (unalignedSum - bestNorm.x[0]) / bestNorm.x[1]
# JSON isn't serializing numpy types anymore, so we have to explicitely cast them
self.hotpixInfo[u"guessDeadpix"] = int( np.sum( unalignedSigma < cutoffLower ) )
self.hotpixInfo[u"guessHotpix"] = int( np.sum( unalignedSigma > cutoffUpper ) )
self.hotpixInfo[u"frameMean"] = float( bestNorm.x[0]/self.images.shape[0] )
self.hotpixInfo[u"frameStd"] = float( bestNorm.x[1]/np.sqrt(self.images.shape[0]) )
print( "Applying outlier pixel filter with sigma limits (%.2f,%.2f), n=(dead:%d,hot:%d)" \
% (cutoffLower, cutoffUpper, self.hotpixInfo[u"guessDeadpix"],self.hotpixInfo[u"guessHotpix"] ) )
# Some casting problems here with Python float up-casting to np.float64...
UnityFloat32 = np.float32( 1.0 )
logK = np.float32( self.hotpixInfo[u'logisticK'] )
relax = np.float32( self.hotpixInfo[u'relax'] )
logisticMask = nz.evaluate( "1.0 - 1.0 / ( (1.0 + exp(logK*(unalignedSigma-cutoffLower*relax)) ) )" )
logisticMask = nz.evaluate( "logisticMask / ( (1.0 + exp(logK*(unalignedSigma-cutoffUpper*relax)) ) )" ).astype(float_dtype)
convLogisticMask = nz.evaluate( "UnityFloat32 - logisticMask" )
# So we need 2 masks, one for pixels that have no outlier-neighbours, and
# another for joined/neighbourly outlier pixels.
# I can probably make the PSF kernel smaller... to speed things up.
neighbourlyOutlierMask = (UnityFloat32 - logisticMask) * scipy.ndimage.convolve( np.float32(1.0) - logisticMask, psfKernel )
"""
Singleton outliers have no neighbours that are also outliers, so we substitute their values
with the expected value based on the point-spread function of the detector.
"""
singletonOutlierMask = nz.evaluate( "convLogisticMask * (neighbourlyOutlierMask <= neighbourThres)" )
m = self.images.shape[0]
unalignedMean = nz.evaluate( "unalignedSum/m" )
psfFiltMean = scipy.ndimage.convolve( unalignedMean, psfKernel ).astype(float_dtype)
"""
The neighbourFilt deals with outliers that have near neihbours that are also
outliers. This isn't uncommon due to defects in the camera.
"""
neighbourlyOutlierMask = nz.evaluate( "neighbourlyOutlierMask > neighbourThres" )
neighbourlyIndices = np.where( nz.evaluate( "neighbourlyOutlierMask > neighbourThres" ) )
bestMean = bestNorm.x[0] / m
print( "Number of neighborly outlier pixels: %d" % len(neighbourlyIndices[0]) )
self.hotpixInfo[u'neighborPix'] = len(neighbourlyIndices[0])
neighbourFilt = np.zeros_like( psfFiltMean )
for (nY, nX) in zip( neighbourlyIndices[0], neighbourlyIndices[1] ):
# We'll use 5x5 here, substituting the bestMean if it's all garbage
neighbourhood = neighbourlyOutlierMask[nY-1:nY+2,nX-1:nX+2]
nRatio = np.sum( neighbourhood ) / neighbourhood.size
if nRatio > 0.66 or nRatio <= 0.001 or np.isnan(nRatio):
neighbourFilt[nY,nX] = bestMean
else:
neighbourFilt[nY,nX] = convLogisticMask[nY,nX]*np.mean(unalignedMean[nY-1:nY+2,nX-1:nX+2][~neighbourhood])
stack = self.images
self.images = nz.evaluate( "logisticMask*stack + singletonOutlierMask*psfFiltMean + neighbourFilt" )
if u"decorrOutliers" in self.hotpixInfo and self.hotpixInfo[ u"decorrOutliers" ]:
"""
This adds a bit of random noise to pixels that have been heavily filtered
to a uniform value, so they aren't correlated noise. This should only
affect Zorro and Relion movie processing.
"""
decorrStd = np.sqrt( bestNorm.x[1]**2 / m ) / 2.0
N_images = self.images.shape[0]
filtPosY, filtPosX = np.where( logisticMask < 0.5 )
# I don't see a nice way to vectorize this loop. With a ufunc?
for J in np.arange( filtPosY.size ):
self.images[ :, filtPosY[J], filtPosX[J] ] += np.random.normal( \
scale=decorrStd*convLogisticMask[filtPosY[J],filtPosX[J]], size=N_images )
if MADE_3D:
self.images = np.squeeze( self.images )
self.bench['hot1'] = time.time()
del logK, relax, logisticMask, psfFiltMean, stack, UnityFloat32, singletonOutlierMask
pass
def hotpixFilter_SINGLETON( self, cutoffLower=None, cutoffUpper=None ):
"""
Identifies and removes hot pixels using a stocastic weighted approach.
replaced with a Gaussian filter. Hot pixels do not affect Zorro too much
due to the intensity-normalized cross-correlation but the tracks of the
hot pixels do upset other software packages.
PSF is used to provide a camera-specific PSF to filter hot pixels. If
you have an MTF curve for a detector we can provide a psf tailored to that
particular device, otherwise use None for a uniform filter.
"""
self.bench['hot0'] = time.time()
if self.hotpixInfo[u"psf"] == u"K2":
psf = np.array( [0.0, 0.173235968, 0.016518], dtype='float32' )
else: # default to uniform filter
psf = np.array( [0.0, 1.0, 1.0], dtype='float32' )
psfKernel = np.array( [ [psf[2]*psf[2], psf[2]*psf[1], psf[2], psf[2]*psf[1], psf[2]*psf[2] ],
[psf[2]*psf[1], psf[1]*psf[1], psf[1], psf[1]*psf[1], psf[1]*psf[2] ],
[psf[2], psf[1], 0.0, psf[1], psf[2] ],
[psf[2]*psf[1], psf[1]*psf[1], psf[1], psf[1]*psf[1], psf[1]*psf[2] ],
[ psf[2]*psf[2], psf[2]*psf[1], psf[2], psf[2]*psf[1], psf[2]*psf[2] ] ], dtype='float32' )
psfKernel /= np.sum( psfKernel )
if self.images.ndim == 2:
# Mostly used when processing flatfields for gain reference normalization
self.images = np.reshape( self.images, [1, self.images.shape[0], self.images.shape[1]])
MADE_3D = True
else:
MADE_3D = False
unalignedSum = np.sum( self.images, axis=0 )
sumMean = np.mean( unalignedSum )
poissonStd = np.sqrt( sumMean )
histBins = np.arange( np.floor( sumMean - self.hotpixInfo[u"maxSigma"]*poissonStd)-0.5, np.ceil(sumMean+self.hotpixInfo[u"maxSigma"]*poissonStd)+0.5, 1 )
unalignedHist, unalignedCounts = np.histogram( unalignedSum, histBins )
unalignedHist = unalignedHist.astype('float32');
# Make unalignedCounts bin centers rather than edges
unalignedCounts = unalignedCounts[:-1].astype('float32')
unalignedCounts += 0.5* (unalignedCounts[1]-unalignedCounts[0])
# Here we get sigma values from the CDF, which is smoother than the PDF due
# to the integration applied.
cdfHist = np.cumsum( unalignedHist )
cdfHist /= cdfHist[-1]
###################################
# Optimization of mean and standard deviation
# TODO: add these stats to the object
def errorNormCDF( params ):
return np.sum( np.abs( cdfHist -
scipy.stats.norm.cdf( unalignedCounts, loc=params[0], scale=params[1] ) ) )
bestNorm = scipy.optimize.minimize( errorNormCDF, (sumMean,poissonStd),
method="L-BFGS-B",
bounds=((sumMean-0.5*poissonStd, sumMean+0.5*poissonStd),
(0.7*poissonStd, 1.3*poissonStd) ) )
# normCDF = scipy.stats.norm.cdf( unalignedCounts, loc=bestNorm.x[0], scale=bestNorm.x[1] )
#####################################
sigmaFromCDF = np.sqrt(2) * scipy.special.erfinv( 2.0 * cdfHist - 1 )
#sumFromHist = np.sum( unalignedHist )
#meanFromHist = np.float32( np.sum( unalignedHist * unalignedCounts ) / sumFromHist )
#stdFromHist = np.float32( np.sqrt( np.sum( unalignedHist * unalignedCounts**2 )/ sumFromHist - meanFromHist*meanFromHist ) )
#invStdFromHist = np.float32(1.0 / stdFromHist )
normalSigma = (unalignedCounts - bestNorm.x[0]) / bestNorm.x[1]
# TODO: try to keep these infs from being generated in the first place
errorNormToCDF = normalSigma - sigmaFromCDF
keepIndices = ~np.isinf( errorNormToCDF )
errorNormToCDF = errorNormToCDF[keepIndices]
# unalignedCountsKeep = unalignedCounts[keepIndices]
normalSigmaKeep = normalSigma[keepIndices]
# TODO: add try-except, using a fixed error difference if the fitting fails
if not bool(cutoffLower):
try:
lowerIndex = np.where( errorNormToCDF > -0.5 )[0][0]
lowerA = np.array( [normalSigmaKeep[:lowerIndex], np.ones(lowerIndex )] )
lowerFit = np.linalg.lstsq( lowerA.T, errorNormToCDF[:lowerIndex] )[0]
cutoffLower = np.float32( -lowerFit[1]/lowerFit[0] )
except:
print( "zorro.hotpixFilter failed to estimate bound for dead pixels, defaulting to -4.0" )
cutoffLower = np.float32( -4.0 )
if not bool(cutoffUpper):
try:
upperIndex = np.where( errorNormToCDF < 0.5 )[0][-1]
upperA = np.array( [normalSigmaKeep[upperIndex:], np.ones( len(normalSigmaKeep) - upperIndex )] )
upperFit = np.linalg.lstsq( upperA.T, errorNormToCDF[upperIndex:] )[0]
cutoffUpper = np.float32( -upperFit[1]/upperFit[0] )
except:
print( "zorro.hotpixFilter failed to estimate bound for hot pixels, defaulting to +3.25" )
cutoffLower = np.float32( 3.25 )
unalignedSigma = (unalignedSum - bestNorm.x[0]) / bestNorm.x[1]
print( "Applying progressive outlier pixel filter with sigma limits (%.2f,%.2f)" % (cutoffLower, cutoffUpper) )
# JSON isn't serializing numpy types anymore, so we have to explicitely cast them
self.hotpixInfo[u'cutoffLower'] = float( cutoffLower )
self.hotpixInfo[u'cutoffUpper'] = float( cutoffUpper )
self.hotpixInfo[u"guessDeadpix"] = int( np.sum( unalignedSigma < cutoffLower ) )
self.hotpixInfo[u"guessHotpix"] = int( np.sum( unalignedSigma > cutoffUpper ) )
self.hotpixInfo[u"frameMean"] = float( bestNorm.x[0]/self.images.shape[0] )
self.hotpixInfo[u"frameStd"] = float( bestNorm.x[1]/np.sqrt(self.images.shape[0]) )
logK = np.float32( self.hotpixInfo[u'logisticK'] )
relax = np.float32( self.hotpixInfo[u'relax'] )
logisticMask = nz.evaluate( "1.0 - 1.0 / ( (1.0 + exp(logK*(unalignedSigma-cutoffLower*relax)) ) )" )
logisticMask = nz.evaluate( "logisticMask / ( (1.0 + exp(logK*(unalignedSigma-cutoffUpper*relax)) ) )" ).astype('float32')
# So we need 2 masks, one for pixels that have no outlier-neighbours, and
# another for joined/neighbourly outlier pixels.
singletonOutlierMask = scipy.ndimage.convolve( logisticMask, np.ones_like(psfKernel) )
# Some casting problems here with Python float up-casting to np.float64...
UnityFloat32 = np.float32( 1.0 )
psfFiltMean = scipy.ndimage.convolve( unalignedSum/self.images.shape[0], psfKernel ).astype('float32')
stack = self.images
nz.evaluate( "(UnityFloat32-logisticMask) *stack + logisticMask*psfFiltMean" )
if u"decorrOutliers" in self.hotpixInfo and self.hotpixInfo[ u"decorrOutliers" ]:
"""
This adds a bit of random noise to pixels that have been heavily filtered
to a uniform value, so they aren't correlated noise. This should only
affect Zorro and Relion movie processing.
"""
decorrStd = np.std( self.images[0,:,:] )
N_images = self.images.shape[0]
filtPosY, filtPosX = np.where( logisticMask < 0.98 )
# I don't see a nice way to vectorize this loop. With a ufunc?
for J in np.arange( filtPosY.size ):
self.images[ :, filtPosY[J], filtPosX[J] ] += np.random.normal( scale=decorrStd, size=N_images )
if MADE_3D:
self.images = np.squeeze( self.images )
self.bench['hot1'] = time.time()
del logK, relax, logisticMask, psfFiltMean, stack, UnityFloat32, singletonOutlierMask
def setBfiltCutoff( self, cutoffSpacing ):
"""
stackReg.bBfiltCutoff( cutoffSpacing )
Expects stackReg.pixelsize to be set, and stackReg.images to be loaded.
Units of pixelsize from DM4 is nm, so the cutoff spacing should also be
nm. E.g. cutoffspacing = 0.3 [nm] is 3.0 Angstroms.
For a gaussian B-filter, the cutoff is where the filter ampitude drops
to 1/exp(1)
"""
shapeImage = np.array( self.images.shape[1:] )
psInv = 1.0 / (self.pixelsize*shapeImage)
cutoffInv = 1.0 / cutoffSpacing
self.Brad = cutoffInv / psInv
print( "Setting Brad to: " + str(self.Brad) )
pass
def getCropLimits( self, trans = None ):
if trans is None:
trans = self.translations
yendcrop = np.minimum( np.floor( trans[:,0].min() ), 0 ).astype('int')
if yendcrop == 0:
yendcrop = None
xendcrop = np.minimum( np.floor( trans[:,1].min() ), 0 ).astype('int')
if xendcrop == 0:
xendcrop = None
ystartcrop = np.maximum( np.ceil( trans[:,0].max() ), 0 ).astype('int')
xstartcrop = np.maximum( np.ceil( trans[:,1].max() ), 0 ).astype('int')
return np.array( [ystartcrop, xstartcrop, yendcrop, xendcrop] )
def getSumCropToLimits( self ):
"""
Gets imageSum cropped so that no pixels with partial dose are kept.
"""
cropLim = self.getCropLimits()
return self.imageSum[cropLim[0]:cropLim[2], cropLim[1]:cropLim[3]]
def getFiltSumCropToLimits( self ):
"""
Gets filtSum cropped so that no pixels with partial dose are kept.
"""
cropLim = self.getCropLimits()
return self.filtSum[cropLim[0]:cropLim[2], cropLim[1]:cropLim[3]]
def getImagesCropToLimits( self ):
"""
Gets images stack cropped so that no pixels with partial dose are kept.
"""
cropLim = self.getCropLimits()
return self.images[:,cropLim[0]:cropLim[2], cropLim[1]:cropLim[3]]
def getMaskCropLimited( self ):
"""
Get a mask that crops the portion of the image that moved, for refinement.
"""
cropLim = self.getCropLimits()
if cropLim[2] == None: cropLim[2] = 0;
if cropLim[3] == None: cropLim[3] = 0;
if np.any( self.shapeOriginal ):
newMask = np.zeros( [1,self.shapeOriginal[0],self.shapeOriginal[1]], dtype=float_dtype )
newMask[:,cropLim[0]:self.shapeOriginal[0]+cropLim[2], cropLim[1]:self.shapeOriginal[1]+cropLim[3]] = 1.0
else:
newMask = np.zeros( [1,self.images.shape[1],self.images.shape[2]], dtype=float_dtype )
newMask[:,cropLim[0]:self.images.shape[1]+cropLim[2], cropLim[1]:self.images.shape[2]+cropLim[3]] = 1.0
return newMask
def execGCTF( self, movieMode=False, movieFrameToAverage=8, movieFit=0, movieType=1 ):
"""
Calls GCTF.
I.e. movieMode=True
# Movie options to calculate defocuses of each frame:
# --mdef_aveN 8 Average number of moive frames for movie or particle stack CTF refinement
# --mdef_fit 0 0: no fitting; 1: linear fitting defocus changes in Z-direction
# --mdef_ave_type 0 0: coherent average, average FFT with phase information(suggested for movies); 1:incoherent average, only average amplitude(suggested for particle stack);
"""
self.bench['ctf0'] = time.time()
print( " Kai Zhang, 'Gctf: real-time CTF determination and correction', J. Struct. Biol., 193(1): 1-12, (2016)" )
print( " http://www.sciencedirect.com/science/article/pii/S1047847715301003" )
if self.cachePath is None:
self.cachePath = "."
try: os.umask( self.umask ) # Why is Python not using default umask from OS?
except: pass
stackBase = os.path.splitext( os.path.basename( self.files[u'stack'] ) )[0]
mrcName = os.path.join( self.cachePath, stackBase + u"_gctf.mrc" )
mrcFront = os.path.splitext( mrcName )[0]
diagOutName = mrcFront + u".ctf"
logName = mrcFront + u"_ctffind3.log"
epaName = mrcFront + u"_EPA.log"
if bool( movieMode ):
# Write an MRCS
mrcz.writeMRC( self.images, mrcName )
# Call GCTF
gctf_exec = "gctf %s --apix %f --kV %f --cs %f --do_EPA 1 --mdef_ave_type 1 --logsuffix _ctffind3.log " % (mrcName, self.pixelsize*10, self.voltage, self.C3 )
gctf_exec += " --mdef_aveN %d --mdef_fit %d --mdef_ave_type %d" %( movieFrameToAverage, movieFit, movieType )
else: # No movieMode
if not np.any( self.imageSum ):
raise AttributeError( "Error in execGCTF: No image sum found" )
mrcz.writeMRC( self.imageSum, mrcName )
# Call GCTF
gctf_exec = "gctf %s --apix %f --kV %f --cs %f --do_EPA 1 --logsuffix _ctffind3.log " % (mrcName, self.pixelsize*10, self.voltage, self.C3 )
# Need to redirect GCTF output to null because it's formatted with UTF-16 and this causes Python 2.7 problems.
devnull = open(os.devnull, 'w' )
subprocess.call( gctf_exec, shell=True, stdout=devnull, stderr=devnull )
# sub = subprocess.Popen( gctf_exec, shell=True )
#sub.wait()
# Diagnostic image ends in .ctf
self.CTFDiag = mrcz.readMRC( diagOutName )[0]
# Parse the output _ctffind3.log for the results
with open( logName, 'r' ) as fh:
logCTF = fh.readlines()
ctf = logCTF[-5].split()
self.CTFInfo[u'DefocusU'] = float( ctf[0] )
self.CTFInfo[u'DefocusV'] = float( ctf[1] )
self.CTFInfo[u'DefocusAngle'] = float( ctf[2] )
self.CTFInfo[u'CtfFigureOfMerit'] = float( ctf[3] )
self.CTFInfo[u'FinalResolution'] = float( logCTF[-3].split()[-1] )
self.CTFInfo[u'Bfactor'] = float( logCTF[-2].split()[-1] )
# Output compact _ctffind3.log
self.saveRelionCTF3( )
# Remove temporary files and log file
try: os.remove( diagOutName )
except: pass
try: os.remove( logName ) # Technically we could keep this.
except: pass
try: os.remove( mrcName )
except: pass
try: os.remove( epaName )
except: pass
self.bench['ctf1'] = time.time()
def execCTFFind41( self, movieMode=False, box_size = 1024, contrast=0.067,
min_res=50.0, max_res=4.0,
min_C1=5000.0, max_C1=45000.0, C1_step = 500.0,
A1_tol = 500.0 ):
"""
Calls CTFFind4, must be on the system path.
movieMode = True does not require an aligned image (works on Fourier magnitudes)
box_size = CTFFind parameter, box size to FFT
contrast = estimated phase contrast in images
min_res = minimum resolution to fit, in Angstroms
max_res = maximum resolution to fit, in Angstroms. Water ice is around 3.4 Angstroms
min_C1 = minimum defocus search range, in Angstroms
max_C1 = maximum defocus search range, in Angstroms
C1_step = defocus search step size, in Angstroms
A1_tol = 2-fold astigmatism tolerance, in Angstroms
"""
self.bench['ctf0'] = time.time()
if util.which( 'ctffind' ) is None:
print( "Error: CTFFIND not found!" )
return
if self.pixelsize is None:
print( "Set pixelsize (in nm) before calling execCTFFind4" )
return
elif self.voltage is None:
print( "Set voltage (in kV) before calling execCTFFind4" )
return
elif self.C3 is None:
print( "Set C3 (in mm) before calling execCTFFind4" )
return
print( "Calling CTFFIND4.1 for %s" % self.files['stack'] )
print( " written by Alexis Rohou: http://grigoriefflab.janelia.org/ctffind4" )
print( " http://biorxiv.org/content/early/2015/06/16/020917" )
ps = self.pixelsize * 10.0
min_res = np.min( [min_res, 50.0] )
try: os.umask( self.umask ) # Why is Python not using default umask from OS?
except: pass
if self.cachePath is None:
self.cachePath = "."
# Force trailing slashes onto cachePatch
stackBase = os.path.splitext( os.path.basename( self.files[u'stack'] ) )[0]
diagOutName = os.path.join( self.cachePath, stackBase + u".ctf" )
try:
mrcName = os.path.join( self.cachePath, stackBase + u"_ctf4.mrc" )
if bool(movieMode):
mrcz.writeMRC( self.images, mrcName )
number_of_frames_to_average = 1
else:
mrcz.writeMRC( self.imageSum, mrcName )
except:
print( "Error in exporting MRC file to CTFFind4.1" )
return
# flags = "--amplitude-spectrum-input --filtered-amplitude-spectrum-input"
flags = "" # Not using any flags
find_additional_phase_shift = "no"
knownAstig = "no"
largeAstig = "no"
restrainAstig = "yes"
expertOptions = "no"
ctfexec = ( "ctffind " + flags + " << STOP_PARSING \n" + mrcName + "\n" )
if bool(movieMode):
ctfexec = ctfexec + "yes\n" + str(number_of_frames_to_average + "\n" )
ctfexec = (ctfexec + diagOutName + "\n" + str(ps) + "\n" + str(self.voltage) + "\n" +
str(self.C3) + "\n" + str(contrast) + "\n" + str(box_size) + "\n" +
str(min_res) + "\n" + str(max_res) + "\n" + str(min_C1) + "\n" +
str(max_C1) + "\n" + str(C1_step) + "\n" + str(knownAstig) + "\n" +
str(largeAstig) + "\n" + str(restrainAstig) + "\n" +
str(A1_tol) + "\n" + find_additional_phase_shift + "\n" +
str(expertOptions) )
ctfexec = ctfexec + "\nSTOP_PARSING"
subprocess.call( ctfexec, shell=True )
try:
logName = os.path.join( self.cachePath, stackBase + ".txt" )
print( "Trying to load from: " + logName )
# Log has 5 comment lines, then 1 header, and
# Micrograph number, DF1, DF2, Azimuth, Additional Phase shift, CC, and max spacing fit-to
CTF4Results = np.loadtxt(logName, comments='#', skiprows=1 )
self.CTFInfo[u'DefocusU'] = float( CTF4Results[1] )
self.CTFInfo[u'DefocusV'] = float( CTF4Results[2] )
self.CTFInfo[u'DefocusAngle'] = float( CTF4Results[3] )
self.CTFInfo[u'AdditionalPhaseShift'] = float( CTF4Results[4] )
self.CTFInfo[u'CtfFigureOfMerit'] = float( CTF4Results[5] )
self.CTFInfo[u'FinalResolution'] = float( CTF4Results[6] )
self.CTFDiag = mrcz.readMRC( diagOutName )[0]
except:
print( "CTFFIND4 likely core-dumped, try different input parameters?" )
pass
# Write a RELION-style _ctffind3.log file, with 5 um pixel size...
self.saveRelionCTF3()
# TODO: having trouble with files not being deletable, here. Is CTFFIND4 holding them open? Should
# I just pause for a short time?
time.sleep(0.5) # DEBUG: try and see if temporary files are deletable now.
try: os.remove( mrcName )
except IOError:
print( "Could not remove temporary file: " + str(IOError) )
try: os.remove( diagOutName )
except IOError:
print( "Could not remove temporary file: " + str(IOError) )
# Delete CTF4 logs
try: os.remove( os.path.join( self.cachePath, stackBase + "_avrot.txt") )
except: pass
try: os.remove( logName )
except: pass
try: os.remove( os.path.join( self.cachePath, stackBase + ".ctf" ) )
except: pass
self.bench['ctf1'] = time.time()
def execCTFFind4( self, movieMode=False, box_size = 512, contrast=0.067,
min_res=50.0, max_res=4.0,
min_C1=5000.0, max_C1=45000.0, C1_step = 500.0,
A1_tol = 100.0, displayDiag=False ):
"""
Calls CTFFind4, must be on the system path.
movieMode = True does not require an aligned image (works on Fourier magnitudes)
box_size = CTFFind parameter, box size to FFT
contrast = estimated phase contrast in images
min_res = minimum resolution to fit, in Angstroms
max_res = maximum resolution to fit, in Angstroms. Water ice is around 3.4 Angstroms
min_C1 = minimum defocus search range, in Angstroms
max_C1 = maximum defocus search range, in Angstroms
C1_step = defocus search step size, in Angstroms
A1_tol = 2-fold astigmatism tolerance, in Angstroms
displayDiag = True plots the diagnostic output image
"""
self.bench['ctf0'] = time.time()
if util.which( 'ctffind' ) is None:
print( "Error: CTFFIND not found!" )
return
if self.pixelsize is None:
print( "Set pixelsize (in nm) before calling execCTFFind4" )
return
elif self.voltage is None:
print( "Set voltage (in kV) before calling execCTFFind4" )
return
elif self.C3 is None:
print( "Set C3 (in mm) before calling execCTFFind4" )
return
print( "Calling CTFFIND4 for " + self.files['stack'] )
print( " written by Alexis Rohou: http://grigoriefflab.janelia.org/ctffind4" )
print( " http://biorxiv.org/content/early/2015/06/16/020917" )
ps = self.pixelsize * 10.0
min_res = np.min( [min_res, 50.0] )
try: os.umask( self.umask ) # Why is Python not using default umask from OS?
except: pass
if self.cachePath is None:
self.cachePath = "."
# Force trailing slashes onto cachePatch
stackBase = os.path.splitext( os.path.basename( self.files[u'stack'] ) )[0]
diagOutName = os.path.join( self.cachePath, stackBase + u".ctf" )
try:
mrcName = os.path.join( self.cachePath, stackBase + u"_ctf4.mrc" )
if movieMode:
input_is_a_movie = 'true'
mrcz.writeMRC( self.images, mrcName )
number_of_frames_to_average = 1
else:
input_is_a_movie = 'false'
mrcz.writeMRC( self.imageSum, mrcName )
except:
print( "Error in exporting MRC file to CTFFind4" )
return
# flags = "--amplitude-spectrum-input --filtered-amplitude-spectrum-input"
flags = "" # Not using any flags
find_additional_phase_shift = 'false'
ctfexec = ( "ctffind " + flags + " << STOP_PARSING \n" + mrcName )
if input_is_a_movie == 'true' or input_is_a_movie == 'yes':
ctfexec = ctfexec + "\n" + input_is_a_movie + "\n" + str(number_of_frames_to_average)
ctfexec = (ctfexec + "\n" + diagOutName + "\n" + str(ps) + "\n" + str(self.voltage) + "\n" +
str(self.C3) + "\n" + str(contrast) + "\n" + str(box_size) + "\n" +
str(min_res) + "\n" + str(max_res) + "\n" + str(min_C1) + "\n" +
str(max_C1) + "\n" + str(C1_step) + "\n" + str(A1_tol) + "\n" +
find_additional_phase_shift )
ctfexec = ctfexec + "\nSTOP_PARSING"
print( ctfexec )
sub = subprocess.Popen( ctfexec, shell=True )
sub.wait()
# os.system( ctfexec )
#print( "CTFFIND4 execution time (s): " + str(t1-t0))
try:
logName = os.path.join( self.cachePath, stackBase + ".txt" )
print( "Trying to load from: " + logName )
# Log has 5 comment lines, then 1 header, and
# Micrograph number, DF1, DF2, Azimuth, Additional Phase shift, CC, and max spacing fit-to
CTF4Results = np.loadtxt(logName, comments='#', skiprows=1 )
self.CTFInfo[u'DefocusU'] = float( CTF4Results[1] )
self.CTFInfo[u'DefocusV'] = float( CTF4Results[2] )
self.CTFInfo[u'DefocusAngle'] = float( CTF4Results[3] )
self.CTFInfo[u'AdditionalPhaseShift'] = float( CTF4Results[4] )
self.CTFInfo[u'CtfFigureOfMerit'] = float( CTF4Results[5] )
self.CTFInfo[u'FinalResolution'] = float( CTF4Results[6] )
self.CTFDiag = mrcz.readMRC( diagOutName )[0]
except IOError:
print( "CTFFIND4 likely core-dumped, try different input parameters?" )
pass
# Write a RELION-style _ctffind3.log file, with 5 um pixel size...
self.saveRelionCTF3()
# TODO: having trouble with files not being deletable, here. Is CTFFIND4 holding them open? Should
# I just pause for a short time?
time.sleep(0.5) # DEBUG: try and see if temporary files are deletable now.
try: os.remove( mrcName )
except IOError:
print( "Could not remove temporary file: " + str(IOError.message) )
try: os.remove( diagOutName )
except: pass
# Delete CTF4 logs
try: os.remove( os.path.join( self.cachePath, stackBase + "_avrot.txt") )
except: pass
try: os.remove( logName )
except: pass
try: os.remove( os.path.join( self.cachePath, stackBase + ".ctf" ) )
except: pass
self.bench['ctf1'] = time.time()
def saveRelionCTF3( self ):
# Saves the results from CTF4 in a pseudo-CTF3 log that RELION 1.3/1.4 can handle
# Relevant code is in ctffind_runner.cpp, in the function getCtffindResults() (line 248)
# Relion searchs for:
# "CS[mm], HT[kV], AmpCnst, XMAG, DStep[um]"
# and
# DFMID1 DFMID2 ANGAST CC
#
# 15876.71 16396.97 52.86 0.10179 Final Values
# Mag goes from micrometers of detector pixel size, to specimen pixel size (in nm)
amp_contrast = self.CTFInfo[u'AmplitudeContrast']
if bool(self.detectorPixelSize):
dstep = self.detectorPixelSize # Assumed to be in microns
else:
dstep = 5.0 # default value of 5.0 microns, Relion-2 doesn't use it anyway...
mag = (dstep*1E-6) / (self.pixelsize*1E-9)
if self.files[u'sum'] != None:
sumFront = os.path.splitext( self.files[u'sum'] )[0]
else:
sumFront = os.path.splitext( self.files[u'stack'] )[0]
# Check to see if the sum directory exists already or not
sumDir = os.path.split( sumFront )[0]
if bool(sumDir) and not os.path.isdir( sumDir ):
os.mkdir( sumDir )
self.files[u'ctflog'] = sumFront + u"_ctffind3.log"
logh = open( self.files[u'ctflog'], "w" )
logh.write( u"CS[mm], HT[kV], AmpCnst, XMAG, DStep[um]\n" )
logh.write( u"%.2f"%self.C3 + u" %.1f"%self.voltage + u" " +
str(amp_contrast) + u" %.1f" %mag + u" %.2f"%dstep + u"\n" )
try:
logh.write( u"%.1f"%self.CTFInfo['DefocusU']+ u" %.1f"%self.CTFInfo['DefocusV']
+ u" %.4f"%self.CTFInfo['DefocusAngle']+ u" %.4f"%self.CTFInfo['CtfFigureOfMerit']
+ u" Final Values\n ")
except:
print( "Warning: Could not write CTFInfo to ctf3-style log, probably CTF estimation failed" )
logh.close()
pass
def loadData( self, stackNameIn = None, target=u"stack", leading_zeros=0, useMemmap=False ):
"""
Import either a sequence of DM3 files, a MRCS stack, a DM4 stack, or an HDF5 file.
Target is a string representation of the member name, i.e. 'images', 'imageSum', 'C0'
Files can be compressed with 'lbzip2' (preferred) or 'pigz' with extension '.bz2' or '.gz'
On Windows machines you must have 7-zip in the path to manage compression, and
only .bz2 is supported
filename can be an absolute path name or relative pathname. Automatically
assumes file format based on extension.
"""
self.bench['loaddata0'] = time.time()
# import os
from os.path import splitext
if stackNameIn != None:
self.files[target] = stackNameIn
#### DECOMPRESS FILE ####
# This will move the file to the cachePath, so potentially could result in some confusion
self.files[target] = util.decompressFile( self.files[target], outputDir = self.cachePath )
[file_front, file_ext] = splitext( self.files[target] )
#### IMAGE FILES ####
if file_ext == u".dm3" :
print( "Loading DM3 files in sequence" )
try:
import DM3lib as dm3
from glob import glob
except:
raise ImportError( "Error: DM3lib not found, download at: http://imagejdocu.tudor.lu/doku.php?id=plugin:utilities:python_dm3_reader:start" )
return
file_seq = file_front.rstrip( '1234567890' )
filelist = glob( file_seq + "*" + file_ext )
file_nums = []
for I in range(0, len(filelist) ):
# Get all the file_nums
[file_front, fit_ext] = splitext( filelist[I] )
file_strip = file_front.rstrip( '1234567890' ) # Strip off numbers
file_nums.append( file_front[len(file_strip):] )
file_nums = np.sort( np.array(file_nums,dtype='int' ) )
filecount = len(filelist)
# TO DO: handle things that aren't sequential lists of DM3 files
# Note, ideally we append to images rather than overwriting
dm3struct = dm3.DM3( self.files[target] )
tempData = np.empty( [ filecount, dm3struct.imagedata.shape[0], dm3struct.imagedata.shape[1]] )
tempData[0,:,:] = dm3struct.imagedata
for I in np.arange( 1, filecount ):
filenameDM3 = file_strip + str(file_nums[I]).zfill(leading_zeros) + self.file_ext
print( "Importing: " + filenameDM3 )
dm3struct = dm3.DM3( filenameDM3 )
tempData[I,:,:] = dm3struct.imagedata
elif file_ext == u'.tif' or file_ext == u'.tiff':
# For compressed TIFFs we should use PIL, as it's the fastest. Freeimage
# is actually the fastest but it only imports the first frame in a stack...
try:
import skimage.io
except:
print( "Error: scikit-image or glob not found!" )
return
print( "Importing: " + self.files[target] )
try:
tempData = skimage.io.imread( self.files[target], plugin='pil' ).astype( 'float32' )
except:
print( "Error: PILlow image library not found, reverting to (slow) TIFFFile" )
tempData = skimage.io.imread( self.files[target], plugin='tifffile' ).astype( 'float32' )
"""
# Sequence mode
print( "Loading TIFF files in sequence" )
try:
import skimage.io
from glob import glob
except:
print( "Error: scikit-image or glob not found!" )
return
file_seq = file_front.rstrip( '1234567890' )
filelist = glob( file_seq + "*" + self.file_ext )
file_nums = []
for I in range(0, len(filelist) ):
# Get all the file_nums
[file_front, fit_ext] = splitext( filelist[I] )
file_strip = file_front.rstrip( '1234567890' ) # Strip off numbers
file_nums.append( file_front[len(file_strip):] )
file_nums = np.sort( np.array(file_nums,dtype='int' ) )
filecount = len(filelist)
# see if freeimage is available
try:
skimage.io.use_plugin( 'freeimage' )
except:
print( "FreeImage library not found, it is recommended for TIFF input." )
skimage.io.use_plugin( 'tifffile' )
mage1 = skimage.io.imread( self.files[target] )
tempData = np.empty( [ filecount, mage1.shape[0], mage1.shape[1]] )
tempData[0,:,:] = mage1
for I in np.arange( 1, filecount ):
filenameTIFF = file_strip + str(file_nums[I]).zfill(leading_zeros) + self.file_ext
print( "Importing: " + filenameTIFF )
tempData[I,:,:] = skimage.io.imread( filenameTIFF )
"""
elif file_ext == u".dm4":
# Expects a DM4 image stack
print( "Open as DM4: " + self.files[target] )
dm4obj = mrcz.readDM4( self.files[target], verbose=False, useMemmap = useMemmap )
tempData = np.copy( dm4obj.im[1].imageData.astype( float_dtype ), order='C' )
# Load pixelsize from file
try:
if bool( dm4obj.im[1].imageInfo['DimXScale'] ):
if dm4obj.im[1].imageInfo[u'DimXUnits'] == u'\x14\x00': # This is what we get with no value set.
print( "DM4 pixels have no units, keeping previously set pixelsize" )
if self.pixelsize == None:
self.pixelsize
#else do nothing
else:
self.pixelsize = dm4obj.im[1].imageInfo['DimXScale'] # DM uses units of nm, we assume we don't have rectangular pixels because that's evil
except KeyError: pass
try:
if bool(dm4obj.im[1].imageInfo['Voltage'] ):
self.voltage = dm4obj.im[1].imageInfo['Voltage'] / 1000.0 # in kV
except KeyError: pass
try:
if bool(dm4obj.im[1].imageInfo['C3']):
self.C3 = dm4obj.im[1].imageInfo['C3'] # in mm
except KeyError: pass
try:
if bool(dm4obj.im[1].imageInfo['DetectorPixelSize']):
self.detectorPixelSize = dm4obj.im[1].imageInfo['DetectorPixelSize'][0] # in um
except KeyError: pass
del dm4obj
elif file_ext == u".mrc" or file_ext == u'.mrcs' or file_ext == u".mrcz" or file_ext == u".mrczs":
# Expects a MRC image stack
tempData, header = mrcz.readMRC( self.files[target], pixelunits=u'nm' )
# Force data to 32-bit float if it uint8 or uint16
if tempData.dtype.itemsize < 4:
tempData = tempData.astype('float32')
# As old MotionCorr data has no pixelsize in the header, only accept if the MRC file has non-zero
# This allows a pre-set of ImageRegistrator.pixelsize
if not np.isclose( header[u'pixelsize'][0] , 0.0 ):
# Convert from Angstroms to nm performed internally
self.pixelsize = np.float32( header[u'pixelsize'][0] )
# Should try writing C3 and voltage somewhere
elif file_ext == u".hdf5" or file_ext == u".h5":
try:
h5file = tables.open_file( self.files[target], mode='r' )
except:
print( "Could not open HDF5 file: " + self.files[target] )
print( h5file )
try: tempData = np.copy( h5file.get_node( '/', "images" ), order='C' ).astype('float32')
except: print( "HDF5 file import did not find /images" )
# TODO: load other nodes
try: self.pixelsize = np.copy( h5file.get_node( '/', "pixelsize" ), order='C' )
except: print( "HDF5 file import did not find /pixelsize" )
try: self.voltage = np.copy( h5file.get_node( '/', "voltage" ), order='C' )
except: print( "HDF5 file import did not find /voltage" )
try: self.detectorPixelSize = np.copy( h5file.get_node( '/', "detectorPixelSize" ), order='C' )
except: print( "HDF5 file import did not find /detectorPixelSize" )
try: self.C3 = np.copy( h5file.get_node( '/', "C3" ), order='C' )
except: print( "HDF5 file import did not find /C3" )
try:
h5file.close()
except:
pass
pass
else:
print( "Unknown file extesion: " + stackNameIn )
return
#### GAIN REFERENCE MANAGEMENT ####
if target != u'gainRef' and u'gainRef' in self.files and bool(self.files[u'gainRef']):
# The Gatan gain reference is always a multiplication operation. What of FEI and DE detectors?
if not np.any( self.gainRef ):
self.loadData( self.files[u'gainRef'], target=u'gainRef' )
gainRef = self.gainRef
# Apply gain reference to each tempData, this should broadcast with numexpr?
print( "Applying gain reference: %s" % self.files[u'gainRef'] )
tempData = nz.evaluate( "gainRef * tempData" )
pass
# Finally, assign to target
# TODO: set self.files[] dict values?
if target == u"stack" or target == u'align' or target == u'images':
if tempData.ndim != 3: # Probably the user saved a 2D image by mistake
self.METAstatus = u"error"
self.saveConfig()
raise ValueError( "zorro.loadData: stacks must be 3D data" )
if bool(self.gain) and not np.isclose( self.gain, 1.0 ):
self.images = tempData / self.gain
else:
self.images = tempData
elif target == u"sum" or target == u'imageSum':
self.imageSum = tempData
elif target == u"gainRef":
# Apply flips and rotations
if 'Diagonal' in self.gainInfo and self.gainInfo['Diagonal']:
print( "Rotating gain reference by 90 degrees" )
tempData = np.rot90( tempData, k = 1 )
if 'Horizontal' in self.gainInfo and self.gainInfo['Horizontal'] and \
'Vertical' in self.gainInfo and self.gainInfo['Vertical']:
# This is an image mirror, usually.
print( "Rotating gain reference by 180 degrees (mirror)" )
tempData = np.rot90( tempData, k =2 )
elif 'Horizontal' in self.gainInfo and self.gainInfo['Horizontal']:
print( "Flipping gain reference horizontally (mirror)" )
tempData = np.fliplr( tempData )
elif 'Vertical' in self.gainInfo and self.gainInfo['Vertical']:
print( "Flipping gain reference vertically (mirror)" )
tempData = np.flipud( tempData )
# TODO: see if any other labs have some wierd configuration of flips and rotations.
# The Gatan gain reference has a lot of hot pixel artifacts, that we'll clip away for the moment
# Perhaps we should explicitely use the same algorithm as the hot pixel mask.
#gainCutoff = 1E-4
#gainLim = util.histClim( tempData, cutoff=gainCutoff )
#hotpix = ( tempData <= gainLim[0] ) | ( tempData >= gainLim[1] )
# Possibly we could skip the uniform filter and just force hot pixels to
# 1.0? I might get in trouble from a non-Gatan detector?
# self.gainRef = ~hotpix*tempData + hotpix*scipy.ndimage.uniform_filter( tempData, size=5 )
#self.gainRef = ~hotpix*tempData + hotpix
self.gainRef = tempData
elif target == u"filt" or target == u'filtSum':
self.filtSum = tempData
elif target == u"xc":
self.C = tempData
print( "TODO: set filename for C in loadData" )
elif target == u"mask":
self.masks = tempData
self.bench['loaddata1'] = time.time()
def saveData( self ):
"""
Save files to disk.
Do compression of stack if requested, self.compression = '.bz2' for example
uses lbzip2 or 7-zip. '.gz' is also supported by not recommended.
TODO: add dtype options, including a sloppy float for uint16 and uint8
"""
self.bench['savedata0'] = time.time()
import os, shutil
try: os.umask( self.umask ) # Why is Python not using default umask from OS?
except: pass
# If self.files['config'] exists we save relative to it. Otherwise we default to the place of
# self.files['stack']
# if bool( self.files['config'] ):
# baseDir = os.path.dirname( self.files['config'] )
# else:
# baseDir = os.path.dirname( self.files['stack'] )
stackFront, stackExt = os.path.splitext( os.path.basename( self.files[u'stack'] ) )
if not 'compressor' in self.files or not bool(self.files['compressor']):
mrcExt = ".mrc"
mrcsExt = ".mrcs"
self.files['compressor'] = None
self.files['clevel'] = 0
else:
mrcExt = ".mrcz"
mrcsExt = ".mrcsz"
# Change the current directory to make relative pathing sensible
# try:
# os.chdir( baseDir )
# except:
# baseDir = "."# Usually baseDir is "" which is "."
if stackExt == ".bz2" or stackExt == ".gz" or stackExt == ".7z":
# compressExt = stackExt
stackFront, stackExt = os.path.splitext( stackFront )
if self.files[u'sum'] is None: # Default sum name
self.files[u'sum'] = os.path.join( u"sum", u"%s_zorro%s" %(stackFront, mrcExt) )
# Does the directory exist? Often this will be a relative path to file.config
sumPath, sumFile = os.path.split( self.files[u'sum'] )
if not os.path.isabs( sumPath ):
sumPath = os.path.realpath( sumPath ) # sumPath is always real
if bool(sumPath) and not os.path.isdir( sumPath ):
os.mkdir( sumPath )
relativeSumPath = os.path.relpath( sumPath )
#### SAVE ALIGNED SUM ####
if self.verbose >= 1:
print( "Saving: " + os.path.join(sumPath,sumFile) )
mrcz.writeMRC( self.imageSum, os.path.join(sumPath,sumFile),
pixelsize=self.pixelsize, pixelunits=u'nm',
voltage = self.voltage, C3 = self.C3, gain = self.gain,
compressor=self.files[u'compressor'],
clevel=self.files[u'clevel'],
n_threads=self.n_threads)
# Compress sum
if bool(self.doCompression):
util.compressFile( os.path.join(sumPath,sumFile), self.compress_ext, n_threads=self.n_threads )
#### SAVE ALIGNED STACK ####
if bool(self.saveMovie):
if self.files[u'align'] is None: # Default filename for aligned movie
self.files[u'align'] = os.path.join( u"align", u"%s_zorro_movie%s" % (stackFront, mrcsExt) )
# Does the directory exist?
alignPath, alignFile = os.path.split( self.files[u'align'] )
if not os.path.isabs( sumPath ):
alignPath = os.path.realpath( alignPath )
if bool(alignPath) and not os.path.isdir( alignPath ):
os.mkdir( alignPath )
if self.verbose >= 1:
print( "Saving: " + os.path.join(alignPath,alignFile) )
mrcz.writeMRC( self.images, os.path.join(alignPath,alignFile),
pixelsize=self.pixelsize, pixelunits=u'nm',
voltage = self.voltage, C3 = self.C3, gain = self.gain,
compressor=self.files[u'compressor'],
clevel=self.files[u'clevel'],
n_threads=self.n_threads)
# Compress stack
if bool(self.doCompression):
util.compressFile( os.path.join(alignPath,alignFile), self.compress_ext, n_threads=self.n_threads )
if bool(self.filterMode) and np.any(self.filtSum): # This will be in the same place as sum
if not u'filt' in self.files or self.files[u'filt'] is None: # Default filename for filtered sum
self.files[u'filt'] = os.path.join( relativeSumPath, u"%s_filt%s" %(os.path.splitext(sumFile)[0], mrcExt) )
filtPath, filtFile = os.path.split( self.files[u'filt'] )
if not os.path.isabs( filtPath ):
filtPath = os.path.realpath( filtPath )
if self.verbose >= 1:
print( "Saving: " + os.path.join(filtPath, filtFile) )
mrcz.writeMRC( self.filtSum, os.path.join(filtPath, filtFile),
pixelsize=self.pixelsize, pixelunits=u'nm',
voltage = self.voltage, C3 = self.C3, gain = self.gain,
compressor=self.files[u'compressor'],
clevel=self.files[u'clevel'],
n_threads=self.n_threads)
#### SAVE CROSS-CORRELATIONS FOR FUTURE PROCESSING OR DISPLAY ####
if self.saveC and self.C != None:
self.files[u'xc'] = os.path.join( sumPath, u"%s_xc%s" % (os.path.splitext(sumFile)[0],mrcsExt) )
if self.verbose >= 1:
print( "Saving: " + self.files[u'xc'] )
mrcz.writeMRC( np.asarray( self.C, dtype='float32'), self.files[u'xc'],
pixelsize=self.pixelsize, pixelunits=u'nm',
voltage = self.voltage, C3 = self.C3, gain = self.gain,
compressor=self.files[u'compressor'],
clevel=self.files[u'clevel'],
n_threads=self.n_threads)
if bool(self.doCompression):
util.compressFile( self.files[u'xc'], self.compress_ext, n_threads=self.n_threads )
#### SAVE OTHER INFORMATION IN A LOG FILE ####
# Log file is saved seperately... Calling it here could lead to confusing behaviour.
if u'moveRawPath' in self.files and bool( self.files[u'moveRawPath'] ) and not os.path.isdir( self.files[u'moveRawPath'] ):
os.mkdir( self.files[u'moveRawPath'] )
if bool( self.doCompression ): # does compression and move in one op
self.files[u'stack'] = util.compressFile( self.files[u'stack'], outputDir=self.files[u'moveRawPath'],
n_threads=self.n_threads, compress_ext=self.compress_ext )
elif u'moveRawPath' in self.files and bool( self.files[u'moveRawPath'] ):
newStackName = os.path.join( self.files[u'moveRawPath'], os.path.split( self.files[u'stack'])[1] )
print( "Moving " +self.files[u'stack'] + " to " + newStackName )
try:
os.rename( self.files[u'stack'], newStackName )
except:
# Often we can't rename between file systems so we need to copy and delete instead
shutil.copyfile( self.files[u'stack'], newStackName )
# if os.path.isfile( newStackName) and filecmp.cmp( self.files['stack'], newStackName ):
# filecmp is very, very slow... we need a better trick, maybe just compare sizes
if os.path.isfile( newStackName):
os.remove( self.files[u'stack'] )
else:
print( "Error in copying raw stack, original will not be deleted from input directory" )
self.files[u'stack'] = newStackName
pass
self.bench['savedata1'] = time.time()
def loadConfig( self, configNameIn = None, loadData=False ):
"""
Initialize the ImageRegistrator class from a config file
loadData = True will load data from the given filenames.
"""
import json
if not bool(configNameIn):
if not bool( self.files['config'] ):
pass # Do nothing
else:
print( "Cannot find configuration file: " + self.files[u'config'] )
else:
self.files[u'config'] = configNameIn
print( "Loading config file: " + self.files[u'config'] )
config = configparser.RawConfigParser(allow_no_value = True)
try:
config.optionxform = unicode # Python 2
except:
config.optionxform = str # Python 3
##### Paths #####
# I'd prefer to pop an error here if configName doesn't exist
if not os.path.isfile( self.files[u'config'] ):
raise IOError( "zorro.loadConfig: Could not load config file %s" % self.files[u'config'] )
config.read( self.files[u'config'] )
# Initialization
try: self.verbose = config.getint( u'initialization', u'verbose' )
except: pass
try: self.umask = config.getint( u'initialization', u'umask' )
except: pass
try: self.fftw_effort = config.get( u'initialization', u'fftw_effort' ).upper()
except: pass
try: self.n_threads = config.getint( u'initialization', u'n_threads' )
except: pass
try: self.saveC = config.getboolean( u'initialization', u'saveC' )
except: pass
try: self.METAstatus = config.get( u'initialization', u'METAstatus' )
except: pass
try: self.cachePath = config.get( u'initialization', u'cachePath' )
except: pass
# Calibrations
try: self.pixelsize = config.getfloat(u'calibration',u'pixelsize')
except: pass
try: self.voltage = config.getfloat(u'calibration',u'voltage')
except: pass
try: self.C3 = config.getfloat(u'calibration',u'C3')
except: pass
try: self.gain = config.getfloat(u'calibration',u'gain')
except: pass
try: self.detectorPixelSize = config.getfloat(u'calibration',u'detectorPixelSize')
except: pass
try: self.gainInfo = json.loads( config.get( u'calibration', u'gainInfo' ))
except: pass
# Data
try: self.trackCorrStats = config.getboolean( u'data', u'trackCorrStats' )
except: pass
try: self.corrStats = json.loads( config.get(u'data', u'corrStats') )
except: pass
try: self.bench = json.loads( config.get(u'data', u'bench') )
except: pass
try: self.hotpixInfo = json.loads( config.get(u'data', u'hotpixInfo') )
except: pass
# Results
# Load arrays with json
try: self.translations = np.array( json.loads( config.get( u'results', u'translations' ) ) )
except: pass
try: self.transEven = np.array( json.loads( config.get( u'results', u'transEven' ) ) )
except: pass
try: self.transOdd = np.array( json.loads( config.get( u'results', u'transOdd' ) ) )
except: pass
try: self.velocities = np.array( json.loads( config.get( u'results', u'velocities' ) ) )
except: pass
try: self.rotations = np.array( json.loads( config.get( u'results', u'rotations' ) ) )
except: pass
try: self.scales = np.array( json.loads( config.get( u'results', u'scales' ) ) )
except: pass
try: self.FRC = np.array( json.loads( config.get( u'results', u'FRC' ) ) )
except: pass
try: self.CTFProgram = config.get( u'ctf', u'CTFProgram' )
except: pass
# CTF dict
try: self.ctfInfo = json.loads( config.get( u'ctf', u'CTFInfo' ) )
except: pass
errorDictsExist=True
errCnt = 0
while errorDictsExist:
try:
newErrorDict = {}
dictName = u'errorDict%d' % errCnt
# Load the list of keys and then load them element-by-element
# newErrorDict = json.loads( config.get( 'data', dictName ) )
keyList = json.loads( config.get( dictName, u'keyList' ) )
for key in keyList:
newErrorDict[key] = np.array( json.loads( config.get( dictName, key ) ) )
# convert singular values from arrays
if newErrorDict[key].size == 1:
newErrorDict[key] = newErrorDict[key].item(0)
self.errorDictList.append(newErrorDict)
except: # This stops loading dicts on more or less any error at present
errorDictsExist=False
break
errCnt += 1
# Registration parameters
try: self.xcorrMode = config.get( u'registration', u'xcorrMode' )
except: pass
try: self.triMode = config.get( u'registration', u'triMode' )
except: pass
try: self.startFrame = config.getint( u'registration', u'startFrame' )
except: pass
try: self.endFrame = config.getint( u'registration', u'endFrame' )
except: pass
try: self.shapePadded = np.array( json.loads( config.get( u'registration', u'shapePadded' ) ) )
except: pass
try: self.shapeOriginal = np.array( json.loads( config.get( u'registration', u'shapeOriginal' ) ) )
except: pass
try: self.shapeBinned = np.array( json.loads( config.get( u'registration', u'shapeBinned' ) ) )
except: pass
try: self.fouCrop = np.array( json.loads( config.get( u'registration', u'fouCrop' ) ) )
except: pass
try: self.subPixReg = config.getint( u'registration', u'subPixReg' )
except: pass
try: self.shiftMethod = config.get( u'registration', u'shiftMethod' )
except: pass
try: self.maxShift = config.getint( u'registration', u'maxShift' )
except: pass
try: self.preShift = config.getboolean( u'registration', u'preShift' )
except: pass
try: self.triMode = config.get( u'registration', u'triMode' )
except: pass
try: self.diagWidth = config.getint( u'registration', u'diagWidth' )
except: pass
try: self.diagStart = config.getint( u'registration', u'diagStart' )
except: pass
try: self.autoMax = config.getint( u'registration', u'autoMax' )
except: pass
try: self.peaksigThres = config.getfloat( u'registration', u'peaksigThres' )
except: pass
try: self.corrThres = config.getfloat( u'registration', u'corrThres' )
except: pass
try: self.velocityThres = config.getfloat( u'registration', u'velocityThres' )
except: pass
try: self.Brad = config.getfloat( u'registration', u'Brad' )
except: pass
try: self.Bmode = config.get( u'registration', u'Bmode' )
except: pass
try: self.BfiltType = config.get( u'registration', u'BfiltType' )
except: pass
try: self.originMode = config.get( u'registration', u'originMode' )
except: pass
try: self.suppressOrigin = config.getboolean( u'registration', u'suppressOrigin' )
except: pass
try: self.weightMode = config.get( u'registration', u'weightMode' )
except: pass
try: self.logisticK = config.getfloat( u'registration', u'logisticK' )
except: pass
try: self.logisticNu = config.getfloat( u'registration', u'logisticNu' )
except: pass
try: self.filterMode = config.get( u'registration', u'filterMode' )
except: pass
try: self.doFRC = config.getboolean( u'registration', u'doLazyFRC' )
except: pass
try: self.doEvenOddFRC = config.getboolean( u'registration', u'doEvenOddFRC' )
except: pass
try: self.doseFiltParam = json.loads( config.get( u'registration', u'doseFiltParam' ) ) # This one stays a list
except: pass
# IO
try: self.files = json.loads( config.get( u'io', u'files' ) )
except: pass
try: self.savePNG = config.getboolean( u'io', u'savePNG' )
except: pass
try: self.compress_ext = config.get( u'io', u'compress_ext' )
except: pass
try: self.saveMovie = config.getboolean( u'io', u'saveMovie' )
except: pass
try: self.doCompression = config.getboolean( u'io', u'doCompression' )
except: pass
# Plot
try: self.plotDict = json.loads( config.get( u'plot', u'plotDict' ) )
except: pass
if bool(loadData) and u'stack' in self.files and self.files[u'stack'] != None:
self.loadData()
pass
def saveConfig( self, configNameIn=None ):
"""
Write the state of the ImageRegistrator class from a config file
"""
import json
import os
try: os.umask( self.umask ) # Why is Python not using default umask from OS?
except: pass
if not bool( configNameIn ):
if self.files[u'config'] is None:
self.files[u'config'] = self.files[u'stack'] + u".zor"
else:
self.files['config'] = configNameIn
# Does the directory exist?
configPath = os.path.realpath( os.path.dirname( self.files[u'config'] ) )
if bool(configPath) and not os.path.isdir( configPath ):
os.mkdir( configPath )
# Write config
config = configparser.RawConfigParser(allow_no_value = True)
try:
config.optionxform = unicode # Python 2
except:
config.optionxform = str # Python 3
# Initialization
config.add_section( u'initialization' )
config.set( u'initialization', u'METAstatus', self.METAstatus )
config.set( u'initialization', u'# METAstatus _MUST_ appear as second line in file' )
config.set( u'initialization', u'# For detailed use instructions: github.com/C-CINA/zorro/wiki', None )
config.set( u'initialization', u'verbose', self.verbose )
config.set( u'initialization', u'umask', self.umask )
config.set( u'initialization', u'fftw_effort', self.fftw_effort )
# Any time we cast variables we need handle errors from numpy
config.set( u'initialization', u'# n_threads is usually best if set to the number of physical cores (CPUs)' )
try: config.set( u'initialization', u'n_threads', np.int(self.n_threads) )
except: pass
config.set( u'initialization', u'saveC', self.saveC )
config.set( u'initialization', u'cachePath', self.cachePath )
# Calibrations
config.add_section( u'calibration' )
config.set( u'calibration', u"# Zorro can strip this information from .DM4 files if its is present in tags" )
config.set( u'calibration' , u"# Pixel size in nanometers" )
config.set( u'calibration',u'pixelsize', self.pixelsize )
config.set( u'calibration' , u"# Accelerating voltage in kV" )
config.set( u'calibration',u'voltage', self.voltage )
config.set( u'calibration' , u"# Spherical aberration in mm" )
config.set( u'calibration',u'C3', self.C3 )
config.set( u'calibration' , u"# Gain in electrons/count" )
config.set( u'calibration',u'gain', self.gain )
config.set( u'calibration',u'detectorPixelSize', self.detectorPixelSize )
config.set( u'calibration', u'gainInfo', json.dumps( self.gainInfo ) )
# Registration parameters
config.add_section( u'registration' )
config.set( u'registration', u'xcorrMode', self.xcorrMode )
config.set( u'registration' , u"# tri, diag, first, auto, or autocorr" )
config.set( u'registration', u'triMode', self.triMode )
if self.shapePadded is not None:
if type(self.shapePadded) == type(np.array(1)):
self.shapePadded = self.shapePadded.tolist()
config.set( u'registration', u"# Use a padding 10 % bigger than the original image, select an efficient size with zorro_util.findValidFFTWDim()" )
config.set( u'registration', u'shapePadded', json.dumps( self.shapePadded) )
if self.shapeOriginal is not None:
if type(self.shapeOriginal) == type(np.array(1)):
self.shapeOriginal = self.shapeOriginal.tolist()
config.set( u'registration', u'shapeOriginal', json.dumps( self.shapeOriginal ) )
if self.shapeBinned is not None:
if type(self.shapeBinned) == type(np.array(1)):
self.shapeBinned = self.shapeBinned.tolist()
config.set( u'registration', u'shapeBinned', json.dumps( self.shapeBinned ) )
if self.fouCrop is not None:
if type(self.fouCrop) == type(np.array(1)):
self.fouCrop = self.fouCrop.tolist()
config.set( u'registration', u'fouCrop', json.dumps( self.fouCrop ) )
try: config.set( u'registration', u'subPixReg', np.int(self.subPixReg) )
except: pass
config.set( u'registration', u'shiftMethod', self.shiftMethod )
config.set( u'registration' , u"# Maximum shift in pixels within diagWidth/autoMax frames" )
try: config.set( u'registration', u'maxShift', np.int(self.maxShift) )
except: pass
config.set( u'registration' ,u"# preShift = True is useful for crystalline specimens where you want maxShift to follow the previous frame position" )
config.set( u'registration', u'preShift', self.preShift )
try: config.set( u'registration', u'diagStart', np.int(self.diagStart) )
except: pass
try: config.set( u'registration', u'diagWidth', np.int(self.diagWidth) )
except: pass
try: config.set( u'registration', u'autoMax', np.int(self.autoMax) )
except: pass
try: config.set( u'registration', u'startFrame', np.int(self.startFrame) )
except: pass
try: config.set( u'registration', u'endFrame', np.int(self.endFrame) )
except: pass
config.set( u'registration' , u"# peakSigThres changes with dose but usually is uniform for a dataset" )
config.set( u'registration', u'peaksigThres', self.peaksigThres )
config.set( u'registration' , u"# corrThres is DEPRECATED" )
config.set( u'registration', u'corrThres', self.corrThres )
config.set( u'registration', u'velocityThres', self.velocityThres )
config.set( u'registration' , u"# Brad is radius of B-filter in Fourier pixels" )
config.set( u'registration', u'Brad', self.Brad )
config.set( u'registration' , u"# Bmode = conv, opti, or fourier" )
config.set( u'registration', u'Bmode', self.Bmode )
config.set( u'registration', u'BFiltType', self.BfiltType )
config.set( u'registration' , u"# originMode is centroid, or (empty), empty sets frame 1 to (0,0)" )
config.set( u'registration', u'originMode', self.originMode )
config.set( u'registration' , u"# weightMode is one of logistic, corr, norm, unweighted" )
config.set( u'registration', u'weightMode', self.weightMode )
config.set( u'registration', u'logisticK', self.logisticK )
config.set( u'registration', u'logisticNu', self.logisticNu )
config.set( u'registration' , u"# Set suppressOrigin = True if gain reference artifacts are excessive" )
config.set( u'registration', u'suppressOrigin', self.suppressOrigin )
config.set( u'registration', u'filterMode', self.filterMode )
config.set( u'registration', u'doLazyFRC', self.doLazyFRC )
config.set( u'registration', u'doEvenOddFRC', self.doEvenOddFRC )
if np.any( self.doseFiltParam ) and bool( self.filterMode ):
config.set( u'registration', u'doseFiltParam', json.dumps( self.doseFiltParam ) )
# CTF
config.add_section( u'ctf' )
config.set( u'ctf', u'CTFProgram', self.CTFProgram )
config.set( u'ctf', u'CTFInfo', json.dumps( self.CTFInfo ) )
# IO
config.add_section(u'io')
config.set( u'io', u'savePNG', self.savePNG )
config.set( u'io', u'compress_ext', self.compress_ext )
config.set( u'io', u'saveMovie', self.saveMovie )
config.set( u'io', u'doCompression', self.doCompression )
config.set( u'io' , u"# Note: all paths are relative to the current working directory." )
config.set( u'io', u'files', json.dumps( self.files ) )
# Plot
config.add_section( u'plot' )
config.set( u'plot', u'plotDict', json.dumps( self.plotDict ) )
# Results
# Seems Json does a nice job of handling numpy arrays if converted to lists
config.add_section( u'results' )
if self.translations is not None:
config.set( u'results', u'translations', json.dumps( self.translations.tolist() ) )
if self.transEven is not None:
config.set( u'results', u'transEven', json.dumps( self.transEven.tolist() ) )
if self.transOdd is not None:
config.set( u'results', u'transOdd', json.dumps( self.transOdd.tolist() ) )
if self.rotations is not None:
config.set( u'results', u'rotations', json.dumps( self.rotations.tolist() ) )
if self.scales is not None:
config.set( u'results', u'scales', json.dumps( self.scales.tolist() ) )
if self.velocities is not None:
config.set( u'results', u'velocities', json.dumps( self.velocities.tolist() ) )
if self.FRC is not None:
config.set( u'results', u'FRC', json.dumps( self.FRC.tolist() ) )
# Data
config.add_section( u'data' )
config.set( u'data', u'hotpixInfo', json.dumps( self.hotpixInfo) )
config.set( u'data', u'trackCorrStats', self.trackCorrStats )
config.set( u'data', u'corrStats', json.dumps( self.corrStats) )
config.set( u'data', u'bench', json.dumps( self.bench ) )
# Error dicts
for errCnt, errorDict in enumerate(self.errorDictList):
# For serialization, the errorDict arrays have to be lists.)
dictName = u'errorDict%d'%errCnt
config.add_section( dictName )
keyList = list( errorDict.keys() )
config.set( dictName, u'keyList', json.dumps( keyList ) )
for key in keyList:
if( hasattr( errorDict[key], "__array__" ) ):
config.set( dictName, key, json.dumps( errorDict[key].tolist() ) )
else:
config.set( dictName, key, json.dumps( errorDict[key] ) )
try:
# Would be nice to have some error handling if cfgFH already exists
# Could try and open it with a try: open( 'r' )
cfgFH = open( self.files[u'config'] , 'w+' )
if self.verbose >= 1:
print( "Saving config file: " + self.files[u'config'] )
config.write( cfgFH )
cfgFH.close()
except:
print( "Error in loading config file: " + self.files[u'config'] )
def plot( self, title = "" ):
"""
Multiprocessed matplotlib diagnostic plots.
For each plot, make a list that contains the name of the plot, and a dictionary that contains all the
information necessary to render the plot.
"""
self.bench['plot0'] = time.time()
if not bool(title):
# Remove any pathing from default name as figurePath overrides this.
if bool( self.files[u'stack'] ):
self.plotDict[u'title'] = os.path.split( self.files[u'stack'] )[1]
else:
self.plotDict[u'title'] = u"default"
else:
self.plotDict[u'title'] = title
# figurePath needs to be relative to the config directory, which may not be the current directory.
# if bool(self.savePNG ) and bool(self.files['config']):
# try: # Sometimes this is empty
# os.chdir( os.path.split(self.files['config'])[0] )
# except: pass
# Error checks on figurePath
if not bool( self.files[u'figurePath'] ):
self.files[u'figurePath'] = u"./fig"
if not os.path.isdir( self.files[u'figurePath'] ):
os.mkdir( self.files[u'figurePath'] )
plotArgs = []
# IF IMAGESUM
if np.any(self.imageSum) and u'imageSum' in self.plotDict and ( self.plotDict[u'imageSum'] ):
#print( "zorro.plot.imageSum" )
plotDict = self.plotDict.copy()
# Unfortunately binning only saves time if we do it before pickling the data off to multiprocess.
# TODO: http://stackoverflow.com/questions/7894791/use-numpy-array-in-shared-memory-for-multiprocessing
binning = 2
plotDict[u'pixelsize'] = self.pixelsize * binning
imageSumBinned = util.magickernel( self.getSumCropToLimits(), k=1 )
plotDict[u'image'] = imageSumBinned
# RAM: temporary expidient of filtering FFTs of large images to increase contrast
if self.imageSum.shape[0]*binning > 3072 and self.imageSum.shape[1]*binning > 3072:
plotDict[u'lowPass'] = 0.75
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_imageSum.png")
self.files[u'figImageSum'] = plotDict[u'plotFile']
plotArgs.append( [u'image', plotDict] )
# IF FILTSUM
if np.any(self.filtSum) and u'filtSum' in self.plotDict and bool( self.plotDict[u'filtSum'] ):
#print( "zorro.plot.filtSum" )
plotDict = self.plotDict.copy()
# Unfortunately binning only saves time if we do it before pickling the data off to multiprocess.
# TODO: http://stackoverflow.com/questions/7894791/use-numpy-array-in-shared-memory-for-multiprocessing
binning = 2
plotDict[u'pixelsize'] = self.pixelsize * binning
filtSumBinned = util.magickernel( self.getFiltSumCropToLimits(), k=1 )
plotDict[u'image'] = filtSumBinned
# RAM: temporary expidient of filtering FFTs of large images to increase contrast
if self.imageSum.shape[0]*binning > 3072 and self.imageSum.shape[1]*binning > 3072:
plotDict[u'lowPass'] = 0.75
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_filtSum.png")
self.files[u'figFiltSum'] = plotDict[u'plotFile']
plotArgs.append( [u'image', plotDict] )
# IF FFTSUM
if np.any(self.imageSum) and u'FFTSum' in self.plotDict and bool( self.plotDict[u'FFTSum'] ):
#print( "zorro.plot.FFTSum" )
plotDict = self.plotDict.copy()
# No FFT binning please
plotDict[u'pixelsize'] = self.pixelsize
# We would like the cropped sum but that can be a wierd size that is slow for the FFT
plotDict[u'image'] = self.imageSum
# RAM: temporary expidient of filtering FFTs of large images to increase contrast
if self.imageSum.shape[0] > 3072 and self.imageSum.shape[1] > 3072:
plotDict[u'lowPass'] = 3.0
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_FFTSum.png")
self.files[u'figFFTSum'] = plotDict[u'plotFile']
plotArgs.append( [u'FFT', plotDict] )
pass
# IF POLARFFTSUM
if np.any(self.imageSum) and u'polarFFTSum' in self.plotDict and bool( self.plotDict[u'polarFFTSum'] ):
#print( "zorro.plot.PolarFFTSum" )
plotDict = self.plotDict.copy()
# No FFT binning please
plotDict[u'pixelsize'] = self.pixelsize
# We would like the cropped sum but that can be a wierd size that is slow for the FFT
plotDict[u'image'] = self.imageSum
# RAM: temporary expidient of filtering FFTs of large images to increase contrast
if self.imageSum.shape[0] > 3072 and self.imageSum.shape[1] > 3072:
plotDict[u'lowPass'] = 1.5
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_polarFFTSum.png")
self.files[u'figPolarFFTSum'] = plotDict[u'plotFile']
plotArgs.append( [u'polarFFT', plotDict] )
pass
# IF TRANSLATIONS
if np.any(self.translations) and u'translations' in self.plotDict and bool( self.plotDict[u'translations'] ):
#print( "zorro.plot.Translations" )
plotDict = self.plotDict.copy()
if np.any( self.translations ):
plotDict[u'translations'] = self.translations
try:
plotDict[u'errorX'] = self.errorDictList[0][u'errorX']
plotDict[u'errorY'] = self.errorDictList[0][u'errorY']
except: pass
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_translations.png")
self.files[u'figTranslations'] = plotDict[u'plotFile']
plotArgs.append( [u'translations', plotDict] )
# IF PIXEL REGISTRATION ERROR
if len(self.errorDictList) > 0 and u'pixRegError' in self.plotDict and bool( self.plotDict[u'pixRegError'] ):
#print( "zorro.plot.PixRegError" )
plotDict = self.plotDict.copy()
plotDict[u'errorX'] = self.errorDictList[0][u'errorX']
plotDict[u'errorY'] = self.errorDictList[0][u'errorY']
plotDict[u'errorXY'] = self.errorDictList[0][u'errorXY']
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_pixRegError.png")
self.files[u'figPixRegError'] = plotDict[u'plotFile']
plotArgs.append( [u'pixRegError', plotDict] )
# IF CORRTRIMAT
if len(self.errorDictList) > 0 and u'corrTriMat' in self.plotDict and bool( self.plotDict[u'corrTriMat'] ):
#print( "zorro.plot.coor" )
plotDict = self.plotDict.copy()
plotDict[u'corrTriMat'] = self.errorDictList[-1][u'corrTriMat']
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_corrTriMat.png")
self.files[u'figCorrTriMat'] = plotDict[u'plotFile']
plotArgs.append( [u'corrTriMat', plotDict] )
# IF PEAKSIGTRIMAT
if len(self.errorDictList) > 0 and u'peaksigTriMat' in self.plotDict and bool( self.plotDict[u'peaksigTriMat'] ):
#print( "zorro.plot.peaksig" )
plotDict = self.plotDict.copy()
plotDict[u'peaksigTriMat'] = self.errorDictList[-1][u'peaksigTriMat']
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_peaksigTriMat.png")
self.files[u'figPeaksigTriMat'] = plotDict[u'plotFile']
plotArgs.append( [u'peaksigTriMat', plotDict] )
# IF LOGISTICS CURVE
if len(self.errorDictList) > 0 and u'logisticWeights' in self.plotDict and bool( self.plotDict[u'logisticWeights'] ):
#print( "zorro.plot.logist" )
plotDict = self.plotDict.copy()
if self.weightMode == u'autologistic' or self.weightMode == u'logistic':
plotDict[u'peaksigThres'] = self.peaksigThres
plotDict[u'logisticK'] = self.logisticK
plotDict[u'logisticNu'] = self.logisticNu
plotDict[u'errorXY'] = self.errorDictList[0][u"errorXY"]
plotDict[u'peaksigVect'] = self.errorDictList[0][u"peaksigTriMat"][ self.errorDictList[0]["peaksigTriMat"] > 0.0 ]
if u'cdfPeaks' in self.errorDictList[0]:
plotDict[u'cdfPeaks'] = self.errorDictList[0][u'cdfPeaks']
plotDict[u'hSigma'] = self.errorDictList[0][u'hSigma']
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_logisticWeights.png")
self.files[u'figLogisticWeights'] = plotDict[u'plotFile']
plotArgs.append( [u'logisticWeights', plotDict] )
# IF FRC PLOT
if np.any(self.FRC) and u'FRC' in self.plotDict and bool( self.plotDict[u'FRC'] ):
#print( "zorro.plot.FRC" )
plotDict = self.plotDict.copy()
plotDict[u'FRC'] = self.FRC
plotDict[u'pixelsize'] = self.pixelsize
if bool( self.doEvenOddFRC ):
plotDict[u'labelText'] = u"Even-odd frame independent FRC"
else:
plotDict[u'labelText'] = u"Non-independent FRC is not a resolution estimate"
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_FRC.png")
self.files[u'figLazyFRC'] = plotDict[u'plotFile']
plotArgs.append( [u'lazyFRC', plotDict] )
# IF CTFDIAG PLT
if np.any(self.CTFDiag) and u'CTFDiag' in self.plotDict and bool( self.plotDict[u'CTFDiag'] ):
plotDict = self.plotDict.copy()
plotDict[u'CTFDiag'] = self.CTFDiag
plotDict[u'CTFInfo'] = self.CTFInfo
plotDict[u'pixelsize'] = self.pixelsize
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_CTFDiag.png")
self.files[u'figCTFDiag'] = plotDict[u'plotFile']
plotArgs.append( [u'CTFDiag', plotDict] )
# IF STATS PLOT
if u'stats' in self.plotDict and bool( self.plotDict[u'stats'] ):
#print( "zorro.plot.stats" )
plotDict = self.plotDict.copy()
plotDict[u'pixelsize'] = self.pixelsize
plotDict[u'voltage'] = self.voltage
plotDict[u'C3'] = self.C3
if len( self.errorDictList ) > 0 and u'peaksigTriMat' in self.errorDictList[-1]:
peaksig = self.errorDictList[-1][u'peaksigTriMat']
peaksig = peaksig[ peaksig > 0.0 ]
plotDict[u'meanPeaksig'] = np.mean( peaksig )
plotDict[u'stdPeaksig'] = np.std( peaksig )
plotDict[u'CTFInfo'] = self.CTFInfo
if bool(self.savePNG):
plotDict[u'plotFile'] = os.path.join( self.files[u'figurePath'], self.plotDict[u'title'] + "_Stats.png")
self.files[u'figStats'] = plotDict[u'plotFile']
plotArgs.append( [u'stats', plotDict] )
######
#Multiprocessing pool (to speed up matplotlib's slow rendering and hopefully remove polling loop problems)
#####
if os.name != u'nt' and bool( self.plotDict[u'multiprocess'] ):
figPool = mp.Pool( processes=self.n_threads )
print( " n_threads = %d, plotArgs length = %d" %( self.n_threads, len(plotArgs) ) )
figPool.map( plot.generate, plotArgs )
figPool.close()
figPool.terminate()
# Wait for everyone to finish, otherwise on the infinityband cluster we have problems with partially rendered files.
figPool.join()
else: # Windows mode, can also be used for debugging when plot goes haywire
# Don't multiprocess the plots, but execute serially.
for plotArg in plotArgs:
plot.generate( plotArg )
self.bench['plot1'] = time.time()
def makeMovie( self, movieName = None, clim = None, frameRate=3, graph_cm = u'gnuplot' ):
"""
Use FFMPEG to generate movies showing the correlations. C0 must not be Nonz.
The ffmpeg executable must be in the system path.
"""
import os
fex = '.png'
print( "makeMovie must be able to find FFMPEG on the system path" )
print( "Strongly recommended to use .mp4 extension" )
if movieName is None:
movieName = self.files[u'stack'] + u".mp4"
m = self.C0.shape[0]
# Turn off display of matplotlib temporarily
originalBackend = plt.get_backend()
plt.switch_backend(u'agg')
plt.rc(u'font', family=self.plotDict[u'fontstyle'], size=self.plotDict[u'fontsize'])
corrmat = self.errorDictList[-1][ u'corrTriMat' ]
climCM = [np.min(corrmat[corrmat>0.0]) * 0.75, np.max(corrmat[corrmat>0.0])]
# Get non-zero indices from corrmat
# Note that FFMPEG starts counting at 0.
for J in np.arange(0,m):
corrMap = self.C0[J,:,:].copy(order='C')
figCM = plt.figure()
plt.subplot( '121' )
# corrTriMat
plt.imshow( corrmat, interpolation="nearest", vmin=climCM[0], vmax=climCM[1] )
plt.xlabel( "Base image" )
plt.ylabel( "Template image" )
plt.colorbar( orientation='horizontal' )
plt.title( "Maximum correlation upper-tri matrix" )
plt.set_cmap( graph_cm )
# Draw lines (How to unravel J???)
plt.plot( )
plt.plot( )
# Reset xlim and ylim
plt.xlim( [0, corrMap.shape[2]-1] )
plt.ylim( [0, corrMap.shape[1]-1] )
# C0
plt.subplot( '122' )
if clim is None:
plt.imshow( corrMap, interpolation='none' )
else:
plt.imshow( corrMap, interpolation='none', vmin=clim[0], vmax=clim[1] )
plt.set_cmap( graph_cm )
plt.colorbar( orientation='horizontal' )
# Render and save
plt.tight_layout()
plt.pause(0.05)
plt.savefig( "corrMap_%05d"%J + fex, dpi=self.plotDict['image_dpi'] )
plt.close( figCM )
# corrMap = ( 255.0 * util.normalize(corrMap) ).astype('uint8')
# Convert to colormap as follows: Image.fromarray( np.uint8( cm.ocean_r(stddesk)*255))
# skimage.io.imsave( "corrMap_%05d"%J + fex, mage, plugin='freeimage' )
# skimage.io.imsave( "corrMap_%05d"%J + fex, corrMap )
pass
time.sleep(0.5)
# Remove the old movie if it's there
try:
os.remove( movieName )
except:
pass
# Make a movie with lossless H.264
# One problem is that H.264 isn't compatible with PowerPoint. Can use Handbrake to make it so...
# Framerate command isn't working...
comstring = "ffmpeg -r "+str(frameRate)+ " -f image2 -i \"corrMap_%05d"+fex+"\" -c:v libx264 -preset veryslow -qp 0 -r "+str(frameRate)+ " "+movieName
print( comstring )
sub = subprocess.Popen( comstring, shell=True )
sub.wait()
# os.system( comstring )
# Clean up
for J in np.arange(0,m):
os.remove( "corrMap_%05d"%J + fex )
pass
plt.switch_backend(originalBackend)
def printProfileTimes( self ):
""" Go through and print out all the profile times in self.t """
print( "----PROFILING TIMES----" )
print( " dtypes: float: %s, complex: %s" %(float_dtype, fftw_dtype) )
if bool( np.any(self.filtSum) ):
print( " images.dtype: %s, filtSum.dtype: %s" % (self.images.dtype, self.filtSum.dtype) )
else:
print( " images.dtype: %s" % (self.images.dtype) )
if str(self.images.dtype) == 'float64':
print( " WARNING: running in double-precision (may be slow)" )
try: print( " Loading files (s): %.3f"%(self.bench['loaddata1'] - self.bench['loaddata0']) )
except: pass
try: print( " Image/mask binning (s): %.3f"%(self.bench['bin1'] - self.bench['bin0']) )
except: pass
try: print( " X-correlation initialization (s): %.3f"%(self.bench['xcorr1'] - self.bench['xcorr0']) )
except: pass
try: print( " X-correlation forward FFTs (s): %.3f"%(self.bench['xcorr2'] - self.bench['xcorr1']) )
except: pass
try: print( " X-correlation main computation (s): %.3f"%(self.bench['xcorr3'] - self.bench['xcorr2']) )
except: pass
try: print( " Complete (entry-to-exit) xcorrnm2_tri (s): %.3f"%(self.bench['xcorr3'] - self.bench['xcorr0']) )
except: pass
try: print( " Complete Unblur (s): %.3f" % (self.bench['unblur1'] - self.bench['unblur0']) )
except: pass
try: print( " Shifts solver (last iteration, s): %.3f"%(self.bench['solve1'] - self.bench['solve0']) )
except: pass
try: print( " Subpixel alignment (s): %.3f"%(self.bench['shifts1'] - self.bench['shifts0']) )
except: pass
try: print( " Fourier Ring Correlation (s): %.3f"%(self.bench['frc1'] - self.bench['frc0']))
except: pass
try: print( " Post-process filtering (s): %.3f"%(self.bench['dose1'] - self.bench['dose0']))
except: pass
try: print( " Hotpixel mask (s): %.3f" % (self.bench['hot1'] - self.bench['hot0']))
except: pass
try: print( " CTF estimation with %s (s): %.3f" %( self.CTFProgram, self.bench['ctf1']-self.bench['ctf0'] ) )
except: pass
try: print( " Plot rendering (s): %.3f"%(self.bench['plot1'] - self.bench['plot0']))
except: pass
try: print( " Save files (s): %.3f"%(self.bench['savedata1'] - self.bench['savedata0']))
except: pass
print( "###############################" )
try: print( " Total execution time (s): %.3f"%(time.time() - self.bench['total0']) )
except: pass
pass
##### COMMAND-LINE INTERFACE ####
#if __name__ == '__main__':
# main() | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/zorro.py | zorro.py |
def main():
# Get command line arguments
from matplotlib import rc
rc('backend', qt4="PySide")
import sys, os
import zorro
import numpy as np
import time
# Usage:
# python `which zorro.py` -i Test.dm4 -c default.ini -o test.mrc
stackReg = zorro.ImageRegistrator()
configFile = None
inputFile = None
outputFile = None
try: print( "****Running Zorro-command-line on hostname: %s****"%os.uname()[1] )
except: pass
for J in np.arange(0,len(sys.argv)):
# First argument is zorro.py
# Then we expect flag pairs
# sys.argv is a Python list
if sys.argv[J] == '-c':
configFile = sys.argv[J+1]
J += 1
elif sys.argv[J] == '-i':
inputFile = sys.argv[J+1]
J += 1
elif sys.argv[J] == '-o':
outputFile = sys.argv[J+1]
J += 1
pass
if inputFile == None and configFile == None:
print( "No input files, outputing template.zor" )
stackReg.saveConfig( configNameIn = "template.zor")
sys.exit()
if inputFile == None and not configFile == None:
stackReg.loadConfig( configNameIn=configFile, loadData = True )
stackReg.bench['total0'] = time.time()
if not inputFile == None and not configFile == None:
stackReg.loadConfig( configNameIn=configFile, loadData = False )
stackReg.bench['total0'] = time.time()
stackReg.files['stack'] = inputFile
stackReg.loadData()
if not outputFile == None:
stackReg.files['sum'] = outputFile
# Force use of 'Agg' for matplotlib. It's slower than Qt4Agg but doesn't crash on the cluster
stackReg.plotDict['backend'] = 'Agg'
if stackReg.triMode == 'refine':
# In the case of 'refine' we have to call 'diag' first if it hasn't already
# been performde.
if not bool(stackReg.errorDictList[-1]) and np.any( stackReg.imageSum != None ):
# Assume that
print( "Zorro refine assuming that initial alignment has already been performed." )
pass
else:
print( "Zorro refine performing initial alignment." )
stackReg.triMode = 'diag'
stackReg.alignImageStack()
stackReg.loadData() # Only re-loads the stack
stackReg.triMode = 'refine'
# TODO: enable subZorro refinment.
stackReg.alignImageStack()
else:
# Execute the alignment as called for Zorro/UnBlur/etc.
stackReg.alignImageStack()
# Save everthing and do rounding/compression operations
stackReg.saveData() # Can be None
# Save plots
if stackReg.savePNG:
stackReg.plot()
stackReg.printProfileTimes()
stackReg.METAstatus = 'fini'
stackReg.saveConfig()
print( "Zorro exiting" )
sys.exit()
if __name__ == "__main__":
main() | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/__main__.py | __main__.py |
# TODO: move all this into ReliablePy?
from __future__ import division, print_function, absolute_import
import numpy as np
from . import (zorro_util, zorro)
import time
import os, os.path, glob
import scipy.ndimage
import re
import collections
import mrcz
from mrcz import ReliablePy
# For a multiprocessing, maybe I should use subprocess pool and a master process? That would avoid having to
# any of the Python MPI libraries. Then again maybe I should just learn mpi4py, likely it would be a
# much more robust substitute for multiprocessing going forward.nan
try:
from mpi4py import MPI
except:
#print( "WARNING, zorro.extract: mpi4py module not found, use of multi-processed components will generate errors" )
pass
def readCTF3Log( logName ):
ctfDict = {}
if os.path.isfile( logName ):
ctfInfo = np.loadtxt( logName, skiprows=1, usecols=(0,1,2,3,4), dtype='str' )
ctfDict['DefocusU'] = np.float32( ctfInfo[1,0] )
ctfDict['DefocusV'] = np.float32( ctfInfo[1,1] )
ctfDict['DefocusAngle'] = np.float32( ctfInfo[1,2] )
ctfDict['Voltage'] = np.float32( ctfInfo[0,1] )
ctfDict['SphericalAberration'] = np.float32( ctfInfo[0,0] )
ctfDict['AmplitudeContrast'] = np.float32( ctfInfo[0,2] )
ctfDict['Magnification'] = np.float32( ctfInfo[0,3] )
ctfDict['DetectorPixelSize'] = np.float32( ctfInfo[0,4] )
ctfDict['CtfFigureOfMerit'] = np.float32( ctfInfo[1,3] )
return ctfDict
def readGCTFLog( logName ):
ctfDict = {}
with open( logName, 'rb' ) as lh:
logLines = lh.read()
pixelsize = np.float32( re.findall( "--apix\s+\d+\.\d+", logLines )[0].split()[1] )
ctfDict['DetectorPixelSize'] = np.float32( re.findall( "--dstep\s+\d+\.\d+", logLines )[0].split()[1] )
ctfDict['Magnification'] = 1E4 * ctfDict['DetectorPixelSize'] / pixelsize
ctfDict['Voltage'] = np.float32( re.findall( "--kv\s+\d+\.\d+", logLines )[0].split()[1] )
ctfDict['SphericalAberration'] = np.float32( re.findall( "--cs\s+\d+\.\d+", logLines )[0].split()[1] )
ctfDict['AmplitudeContrast'] = np.float32( re.findall( "--ac\s+\d+\.\d+", logLines )[0].split()[1] )
FinalString = re.findall( "\s+\d+\.\d+\s+\d+\.\d+\s+\d+\.\d+\s+\d+\.\d+\s+Final\sValues", logLines )
FinalSplit = FinalString[0].split()
ctfDict['DefocusU'] = np.float32( FinalSplit[0] )
ctfDict['DefocusV'] = np.float32( FinalSplit[1] )
ctfDict['DefocusAngle'] = np.float32( FinalSplit[2] )
ctfDict['CtfFigureOfMerit'] = np.float32( FinalSplit[3] )
return ctfDict
def partExtract( globPath, boxShape, boxExt=".star",
binShape = None, binKernel = 'lanczos2',
rootName="part", sigmaFilt=-1.0,
invertContrast=True, normalize=True, fitBackground=True,
movieMode=False, startFrame=None, endFrame=None, doseFilter=False ):
"""
Extracts particles from aligned and summed micrographs (generally <micrograph>.mrc).
cd .
globPath = "align\*.mrc" for example, will process all such mrc files.
*.log will use Zorro logs
Can also just pass a list of files
TODO: CHANGE TO LOAD FROM A ZORRO LOG FILE? OR JUST MAKE IT THE PREFERRED OPTION?
Expects to find <micrograph>.star in the directory which has all the box centers in Relion .star format
binShape = [y,x] is the particle box size to resample to. If == None no resampling is done.
For binning, binKernel = 'lanczos2' or 'gauss' reflects the anti-aliasing filter used.
rootName affects the suffix appended to each extracted particle stack (<micrograph>_<rootName>.mrcs)
sigmaFilt is the standard deviation applied for removal of x-rays and hot pixels, 2.5 - 3.0 is the recommended range.
It uses the sigmaFilt value to compute a confidence interval to filter the intensity value of only outlier
pixels (typically ~ 1 %)
invertContrast = True, inverts the contrast, as required for Relion/Frealign.
normalize = True changes the particles to have 0.0 mean and 1.0 standard deviation, as required for Relion/Frealign.
fitBackground = True removes a 2D Gaussian from the image. In general it's better to perform this prior to
particle picking using the Zorro dose filtering+background subtraction mechanism.
TODO: GRAB THE CTF INFORMATION AS WELL AND MAKE A MERGED .STAR FILE
TODO: add a movie mode that outputs a substack (dose-filtered) average.
"""
t0 = time.time()
if isinstance( globPath, list ) or isinstance( globPath, tuple ):
mrcFiles = globPath
else:
mrcFiles = glob.glob( globPath )
try:
os.mkdir( "Particles" )
except:
pass
particleStarList = [None]*len(mrcFiles)
for K, mrcFileName in enumerate(mrcFiles):
boxFileName = os.path.splitext( mrcFileName )[0] + boxExt
if not os.path.isfile( boxFileName ):
print( "Could not find .box/.star file: " + boxFileName )
continue
rlnBox = ReliablePy.ReliablePy()
rlnBox.load( boxFileName )
xCoord = rlnBox.star['data_']['CoordinateX']
yCoord = rlnBox.star['data_']['CoordinateY']
mrcMage = mrcz.readMRC( mrcFileName )[0]
###### Remove background from whole image #####
if bool( fitBackground ):
mrcMage -= zorro_util.backgroundEstimate( mrcMage )
###### Check for particles too close to the edge and remove those coordinates. #####
keepElements = ~( (xCoord < boxShape[1]/2) |
( yCoord < boxShape[0]/2) |
( xCoord > mrcMage.shape[1]-boxShape[1]/2) |
( yCoord > mrcMage.shape[0]-boxShape[0]/2) )
xCoord = xCoord[keepElements]; yCoord = yCoord[keepElements]
##### Extract particles #####
particles = np.zeros( [len(xCoord), boxShape[0], boxShape[1]], dtype='float32' )
for J in np.arange( len(xCoord) ):
partMat = mrcMage[ yCoord[J]-boxShape[0]/2:yCoord[J]+boxShape[0]/2, xCoord[J]-boxShape[1]/2:xCoord[J]+boxShape[1]/2 ]
###### Apply confidence-interval gaussian filter #####
if sigmaFilt > 0.0:
partMean = np.mean( partMat )
partStd = np.std( partMat )
partHotpix = np.abs(partMat - partMean) > sigmaFilt*partStd
# Clip before applying the median filter to better limit multiple pixel hot spots
partMat = np.clip( partMat, partMean - sigmaFilt*partStd, partMean + sigmaFilt*partStd )
# Let's stick to a gaussian_filter, it's much faster than a median filter and seems equivalent if we pre-clip
# boxFilt[J,:,:] = scipy.ndimage.median_filter( boxMat[J,:,:], [5,5] )
partFilt = scipy.ndimage.gaussian_filter( partMat, 4.0 )
particles[J,:,:] = partHotpix * partFilt + (~ partHotpix) * partMat
else:
particles[J,:,:] = partMat
#ims( -particles )
##### Re-scale particles #####
if np.any( binShape ):
binFact = np.array( boxShape ) / np.array( binShape )
# Force binFact to power of 2 for now.
import math
binFact[0] = np.floor( math.log( binFact[0], 2 ) ) ** 2
binFact[1] = np.floor( math.log( binFact[1], 2 ) ) ** 2
[xSample,ySample] = np.meshgrid( np.arange( 0,binShape[1] )/binFact[1], np.arange( 0,binShape[0] )/binFact[0] )
if binKernel == 'lanczos2':
# 2nd order Lanczos kernel
lOrder = 2
xWin = np.arange( -lOrder, lOrder + 1.0/binFact[1], 1.0/binFact[1] )
yWin = np.arange( -lOrder, lOrder + 1.0/binFact[0], 1.0/binFact[0] )
xWinMesh, yWinMesh = np.meshgrid( xWin, yWin )
rmesh = np.sqrt( xWinMesh*xWinMesh + yWinMesh*yWinMesh )
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
windowKernel = (lOrder/(np.pi*np.pi*rmesh*rmesh)) * np.sin( np.pi / lOrder * rmesh ) * np.sin( np.pi * rmesh )
windowKernel[ yWin==0, xWin==0 ] = 1.0
elif binKernel == 'gauss':
xWin = np.arange( -2.0*binFact[1],2.0*binFact[1]+1.0 )
yWin = np.arange( -2.0*binFact[0],2.0*binFact[0]+1.0 )
print( "binFact = " + str(binFact) )
xWinMesh, yWinMesh = np.meshgrid( xWin, yWin )
# rmesh = np.sqrt( xWinMesh*xWinMesh + yWinMesh*yWinMesh )
windowKernel = np.exp( -xWinMesh**2/(0.36788*binFact[1]) - yWinMesh**2/(0.36788*binFact[0]) )
pass
partRescaled = np.zeros( [len(xCoord), binShape[0], binShape[1]], dtype='float32' )
for J in np.arange( len(xCoord) ):
# TODO: switch from squarekernel to an interpolator so we can use non-powers of 2
partRescaled[J,:,:] = zorro_util.squarekernel( scipy.ndimage.convolve( particles[J,:,:], windowKernel ),
k= binFact[0] )
particles = partRescaled
pass
# ims( windowKernel ) # DEBUG
print( " particles.dtype = " + str(particles.dtype) )
###### Normalize particles after binning and background subtraction #####
if bool(normalize):
particles -= np.mean( particles, axis=0 )
particles *= 1.0 / np.std( particles, axis=0 )
##### Invert contrast #####
if bool(invertContrast):
particles = -particles
###### Save particles to disk ######
# Relion always saves particles within ./Particles/<fileext> but we could use anything if we want to
# make changes and save them in the star file.
particleFileName = os.path.join( "Particles", os.path.splitext( mrcFileName )[0] +"_" + rootName + ".mrcs" )
# TODO: add pixel size to particles file
mrcz.writeMRC( particles, particleFileName ) # TODO: pixelsize
##### Output a star file with CTF and particle info. #####
print( "Particle file: " + particleFileName )
particleStarList[K] = os.path.splitext( particleFileName )[0] + ".star"
print( "star file: " + particleStarList[K] )
headerDict = { "ImageName":1, "CoordinateX":2, "CoordinateY":3, "MicrographName": 4 }
lookupDict = dict( zip( headerDict.values(), headerDict.keys() ) )
#
with open( particleStarList[K], 'wb' ) as fh:
fh.write( "\ndata_images\n\nloop_\n")
for J in np.sort(lookupDict.keys()):
fh.write( "_rln" + lookupDict[J] + " #" + str(J) + "\n")
for I in np.arange(0, len(xCoord) ):
mrcsPartName = os.path.splitext( particleStarList[K] )[0] + ".mrcs"
fh.write( "%06d@%s %.1f %.1f %s\n" % ( I+1, mrcsPartName, xCoord[I], yCoord[I], mrcFileName ) )
# TODO: join all star files, for multiprocessing this should just return the list of star files
# TODO: add CTF Info
t1 = time.time()
print( "Particle extraction finished in (s): %.2f" % (t1-t0) )
return particleStarList
def joinParticleStars( outputName = "zbin2.star", starGlob="Particles/align/*.star", ctfExt="_gctf.log" ):
"""
Take all the star files generated above, load the CTF information, and write a complete data.star
file for Relion processing.
"""
masterRln = ReliablePy.ReliablePy()
masterRln.star['data_'] = collections.OrderedDict()
masterRln.star['data_']['MicrographName'] = []
masterRln.star['data_']['CoordinateX'] = []
masterRln.star['data_']['CoordinateY'] = []
masterRln.star['data_']['ImageName'] = []
masterRln.star['data_']['DefocusU'] = []
masterRln.star['data_']['DefocusV'] = []
masterRln.star['data_']['DefocusAngle'] = []
masterRln.star['data_']['Voltage'] = []
masterRln.star['data_']['SphericalAberration'] = []
masterRln.star['data_']['AmplitudeContrast'] = []
masterRln.star['data_']['Magnification'] = []
masterRln.star['data_']['DetectorPixelSize'] = []
masterRln.star['data_']['CtfFigureOfMerit'] = []
fh = open( outputName, 'wb' )
fh.write( "\ndata_\n\nloop_\n")
headerKeys = masterRln.star['data_'].keys()
for J, key in enumerate(headerKeys):
fh.write( "_rln" + key + " #" + str(J) + "\n")
starList = glob.glob( starGlob )
for starFile in starList:
print( "Joining " + starFile )
stackRln = ReliablePy.ReliablePy()
stackRln.load( starFile )
# First check for and load a ctf log
micrographName = stackRln.star['data_images']['MicrographName'][0]
##### Find the CTF info. #####
# 1st, look for a ctffind3.log file?
logName = os.path.splitext( micrographName )[0] + ctfExt
if not os.path.isfile( logName ):
logName = os.path.splitext( micrographName )[0].rstrip("_filt") + ctfExt
if not os.path.isfile( logName ):
print( "WARNING: CTF results not found for : " + micrographName )
else:
foundLog = True
else:
foundLog = True
try:
if ctfExt == "_gctf.log" and foundLog:
ctfDict = readGCTFLog( logName )
elif ctfExt == "_ctffind3.log" and foundLog:
ctfDict = readCTF3Log( logName )
elif ctfExt == ".mrc.zor" and foundLog:
zReg = zorro.ImageRegistrator()
zReg.loadConfig( logName )
ctfDict = zReg.CTFInfo
ctfDict['Voltage'] = zReg.voltage
ctfDict['SphericalAberration'] = zReg.C3
ctfDict['Magnification'] = 1E4 * zReg.detectorPixelSize / zReg.pixelsize
ctfDict['DetectorPixelSize'] = zReg.detectorPixelSize
except:
print( "Error: Could not load CTF log for %s" % micrographName )
continue
# If the log exists, add the star file
n_part = len( stackRln.star['data_images']['MicrographName'] )
# Build the dictionary up more
stackRln.star['data_images']['DefocusU'] = [ctfDict['DefocusU']] * n_part
stackRln.star['data_images']['DefocusV'] = [ctfDict['DefocusV']] * n_part
stackRln.star['data_images']['DefocusAngle'] = [ctfDict['DefocusAngle']] * n_part
stackRln.star['data_images']['Voltage'] = [ctfDict['Voltage']] * n_part
stackRln.star['data_images']['SphericalAberration'] = [ctfDict['SphericalAberration']] * n_part
stackRln.star['data_images']['AmplitudeContrast'] = [ctfDict['AmplitudeContrast']] * n_part
stackRln.star['data_images']['Magnification'] = [ctfDict['Magnification']] * n_part
stackRln.star['data_images']['DetectorPixelSize'] = [ctfDict['DetectorPixelSize']] * n_part
stackRln.star['data_images']['CtfFigureOfMerit'] = [ctfDict['CtfFigureOfMerit']] * n_part
# TODO: add extra columns from relion-2?
for I in np.arange(n_part):
fh.write( " ")
for J, key in enumerate(headerKeys):
fh.write( str( stackRln.star['data_images'][key][I] ) )
fh.write( " " )
fh.write( "\n" )
fh.close()
if __name__ == "__main__":
# bigList = glob.glob( "/Projects/BTV_GFP/filt/*.mrc" )
# # TODO: split the bigList into N_worker processes
# bigList = bigList[:8]
#
# rln = ReliablePy.ReliablePy()
# partExtract( rln, bigList, boxShape=[512,512],
# boxExt=".star", binShape = [128,128], binKernel = 'lanczos2', rootName="zbin2", sigmaFilt=2.5,
# invertContrast=True, normalize=True, fitBackground=True )
pass | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/extract.py | extract.py |
import matplotlib.figure
import itertools
import collections
import numpy as np
import matplotlib.offsetbox
# TODO: merge ims() functionality into zorroPlot
import matplotlib.pyplot as plt
import matplotlib.colors as col
import scipy.ndimage as ni
import zorro
import os, os.path, sys
import mrcz
##################################################################################
######################## Object-oriented interface ###############################
##################################################################################
class zorroPlot(object):
def __init__(self, filename=None, width=7, height=7, plot_dpi=72, image_dpi=144, facecolor=[0.75,0.75,0.75,1.0],
MplCanvas = None, backend=u'Qt4Agg' ):
"""
Object-oriented plotting interface for Zorro.
"""
# All parameters are stored in a hash-dictionary
self.plotDict = {}
self.plotDict[u'width'] = width
self.plotDict[u'height'] = height
self.plotDict[u'plot_dpi'] = plot_dpi
self.plotDict[u'image_dpi'] = image_dpi
self.plotDict[u'facecolor'] = facecolor
if bool(filename):
print( "TODO: load and display file from zorroPlot.__init__()" )
# http://stackoverflow.com/questions/13714454/specifying-and-saving-a-figure-with-exact-size-in-pixels
self.fig = matplotlib.figure.Figure(figsize=(width, height), facecolor=facecolor, dpi=plot_dpi )
# This forces the plot window to cover the entire space by default
self.axes = self.fig.add_axes( [0.0,0.0,1.0,1.0] )
self.axes.hold(False) # We want the axes cleared every time plot() is called
self.axes2 = None
self.cmaps_cycle = itertools.cycle( [u"gray", u"gnuplot", u"jet", u"nipy_spectral"] )
self.plotDict[u'image_cmap'] = next( self.cmaps_cycle ) # Pre-cycle once...
self.plotDict[u'graph_cmap'] = u"gnuplot"
self.plotDict[u'showBoxes'] = False # Try to load imageSum_boxMask.png as an overlay
self.plotDict[u'colorbar'] = True
if bool( MplCanvas ):
# Avoid calling anything that would require importing PySide here, as we don't want it as an
# explicit dependancy.
self.canvas = MplCanvas
else:
if backend.lower() == u'agg': # CANNOT RENDER TO SCREEN, PRINTING ONLY
from matplotlib.backends.backend_agg import FigureCanvas
elif backend.lower() == u'qt4' or backend.lower() == u'qt4agg':
from matplotlib.backends.backend_qt4agg import FigureCanvas
elif backend.lower() == u'qt5' or backend.lower() == u'qt5agg':
from matplotlib.backends.backend_qt5agg import FigureCanvas
else: # default is qt4agg
from matplotlib.backends.backend_qt4agg import FigureCanvas
self.canvas = FigureCanvas( self.fig )
try: self.canvas.updateGeometry()
except: pass
pass
def updateCanvas( self ):
"""
Updates a (Qt4Agg) FigureCanvas. Typically an automator.MplCanvas type.
"""
try: self.canvas.updateGeometry()
except: pass
#self.canvas.draw() # Necessary with show?
self.canvas.show()
def printPlot( self, dpi_key = u"plot_dpi" ):
"""
Any following commands shared amongst all plot functions go here for brevity.
"""
if 'title' in self.plotDict:
self.axes.set_title( self.plotDict['title'] )
try: self.canvas.updateGeometry()
except: pass
if u'plotFile' in self.plotDict and bool( self.plotDict['plotFile'] ):
if self.plotDict[u'Transparent']:
color = [0,0,0,0]
else:
color = [1,1,1,1]
self.canvas.print_figure( self.plotDict[u'plotFile'], dpi=self.plotDict[dpi_key],
facecolor=color, edgecolor=color )
return self.plotDict[u'plotFile']
def plotEmpty( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] )
self.axes.hold(False)
self.axes.plot( [0.0, 1.0], [0.0,1.0], 'k-' )
self.axes.hold(True)
self.axes.plot( [0.0, 1.0], [1.0,0.0], 'k-' )
self.axes.text( 0.45, 0.25, "No data", fontsize=18 )
self.axes.hold(False)
self.axes.set_axis_off()
def plotPixmap( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] )
self.axes.hold(True)
if u'pixmap' in self.plotDict:
mage = self.axes.imshow( self.plotDict[u'pixmap'], interpolation='sinc' )
self.axes.set_axis_off()
if u'boxMask' in self.plotDict and np.any(self.plotDict[u'boxMask']):
print( "pixmap boxes" )
#scaleDiff = np.array( self.plotDict['pixmap'].shape ) / np.array( self.plotDict['boxMask'].shape )
self.axes.imshow( self.plotDict[u'boxMask'],
extent=mage.get_extent(), interpolation='lanczos' )
else:
print( "No pixmap" )
self.axes.hold(False)
def plotImage( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] )
if "lowPass" in self.plotDict:
self.plotDict['image'] = ni.gaussian_filter( self.plotDict['image'], self.plotDict["lowPass"] )
clim = zorro.util.histClim( self.plotDict['image'], cutoff=1E-4 )
self.axes.hold(True)
mage = self.axes.imshow( self.plotDict['image'], vmin=clim[0], vmax=clim[1], interpolation='nearest',
cmap=self.plotDict['image_cmap'] )
if 'pixelsize' in self.plotDict:
zorro.util.plotScalebar( mage, self.plotDict['pixelsize'] )
if bool(self.plotDict['colorbar']):
self.fig.colorbar( mage, fraction=0.046, pad=0.04)
self.axes.set_axis_off()
self.axes.hold(False)
return self.printPlot( dpi_key=u'image_dpi' )
def plotStack( self ):
print( "TODO: implement plotStack" )
def plotFFT( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] )
self.axes.hold(False)
FFTimage = np.fft.fft2( self.plotDict['image'] )
FFTimage[0,0] = 1.0 # Clip out zero-frequency pixel
FFTimage = np.log10( 1.0 + np.abs( np.fft.fftshift( FFTimage )))
if "lowPass" in self.plotDict:
FFTimage = ni.gaussian_filter( FFTimage, self.plotDict["lowPass"] )
FFTclim = zorro.util.ciClim( FFTimage, sigma=2.5 )
mage = self.axes.imshow( FFTimage, interpolation='bicubic', vmin=FFTclim[0], vmax=FFTclim[1],
cmap=self.plotDict['image_cmap'] )
if 'pixelsize' in self.plotDict:
inv_ps = 1.0 / (FFTimage.shape[0] * self.plotDict['pixelsize'] )
zorro.util.plotScalebar( mage, inv_ps, units=u'nm^{-1}' )
self.axes.set_axis_off()
if bool(self.plotDict['colorbar']):
self.fig.colorbar( mage, fraction=0.046, pad=0.04)
return self.printPlot( dpi_key=u'image_dpi' )
def plotPolarFFT( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] )
self.axes.hold(False)
polarFFTimage = zorro.util.img2polar( np.log10( 1.0 + np.abs( np.fft.fftshift( np.fft.fft2( self.plotDict['image'] )))) )
if "lowPass" in self.plotDict:
polarFFTimage = ni.gaussian_filter( polarFFTimage, self.plotDict["lowPass"] )
FFTclim = zorro.util.ciClim( polarFFTimage, sigma=2.0 )
mage = self.axes.imshow( polarFFTimage, interpolation='bicubic', vmin=FFTclim[0], vmax=FFTclim[1],
cmap=self.plotDict['image_cmap'] )
if 'pixlsize' in self.plotDict:
# Egh, this scalebar is sort of wrong, maybe I should transpose the plot?
inv_ps = 1.0 / (polarFFTimage.shape[0] * self.plotDict['pixelsize'] )
zorro.util.plotScalebar( mage, inv_ps, units=u'nm^{-1}' )
self.axes.set_axis_off()
if bool(self.plotDict['colorbar']):
self.fig.colorbar( mage, fraction=0.046, pad=0.04)
return self.printPlot( dpi_key=u'image_dpi' )
# TODO: render Gautoauto outputs? Maybe I should make the Gautomatch boxes seperately as a largely
# transparent plot, and just add it on top or not?
def plotCorrTriMat( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] )
self.axes.hold(False)
corrtri = self.plotDict['corrTriMat']
clim = [np.min(corrtri[corrtri>0.0])*0.75, np.max(corrtri[corrtri>0.0])]
corrmap = self.axes.imshow( corrtri, interpolation='nearest', vmin=clim[0], vmax=clim[1], cmap=self.plotDict['graph_cmap'] )
self.axes.set_xlabel( "Base image" )
self.axes.set_ylabel( "Template image" )
if bool(self.plotDict['colorbar']):
self.fig.colorbar( corrmap, fraction=0.046, pad=0.04)
return self.printPlot( dpi_key=u'plot_dpi' )
def plotPeaksigTriMat( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] )
self.axes.hold(False)
peaksig = self.plotDict['peaksigTriMat']
clim = [np.min(peaksig[peaksig>0.0])*0.75, np.max(peaksig[peaksig>0.0])]
psmap = self.axes.imshow( peaksig, interpolation='nearest', vmin=clim[0], vmax=clim[1], cmap=self.plotDict['graph_cmap'] )
self.axes.set_xlabel( "Base image" )
self.axes.set_ylabel( "Template image" )
if bool(self.plotDict['colorbar']):
self.fig.colorbar( psmap, fraction=0.046, pad=0.04)
return self.printPlot( dpi_key=u'plot_dpi' )
def plotTranslations( self ):
# rect is [left,bottom,width,height]
self.fig.clear()
self.axes = self.fig.add_axes( [0.12, 0.1, 0.85, 0.85] )
self.axes.hold(True)
if 'errorX' in self.plotDict:
self.axes.errorbar( self.plotDict['translations'][:,1], self.plotDict['translations'][:,0], fmt='k-',
xerr=self.plotDict['errorX'], yerr=self.plotDict['errorY'] )
else:
self.axes.plot( self.plotDict['translations'][:,1], self.plotDict['translations'][:,0], 'k.-',
linewidth=2.0, markersize=16 )
self.axes.plot( self.plotDict['translations'][0,1], self.plotDict['translations'][0,0],
'.', color='purple', markersize=16 )
self.axes.set_xlabel( 'X-axis drift (pix)' )
self.axes.set_ylabel( 'Y-axis drift (pix)' )
self.axes.axis('equal')
self.axes.hold(False)
return self.printPlot( dpi_key=u'plot_dpi' )
def plotPixRegError( self ):
self.fig.clear()
self.axes = self.fig.add_subplot( 211 )
self.axes.hold(False)
self.axes2 = self.fig.add_subplot( 212 )
self.axes2.hold(False)
weightedErrorX = np.abs( self.plotDict['errorX'] )
weightedErrorY = np.abs( self.plotDict['errorY'] )
meanErrX = np.mean( weightedErrorX )
meanErrY = np.mean( weightedErrorY )
stdErrX = np.std( weightedErrorX )
stdErrY = np.std( weightedErrorY )
errorX = np.abs( self.plotDict['errorXY'][:,1] )
errorY = np.abs( self.plotDict['errorXY'][:,0] )
self.axes.semilogy( errorX, '.:', linewidth=1.5, color='black', markersize=12, markerfacecolor='darkslateblue',
label='$\Delta$X: %.3f +/- %.3f pix'%(meanErrX, stdErrX) )
self.axes.legend( fontsize=12, loc='best' )
self.axes.set_ylabel( "X-error estimate (pix)" )
# self.axes.set_title( 'X: %f +/- %f'%(meanErrX, stdErrX) )
self.axes2.semilogy( errorY, '.:', linewidth=1.5, color='black', markersize=12, markerfacecolor='darkolivegreen',
label='$\Delta$Y: %.3f +/- %.3f pix'%(meanErrY, stdErrY) )
#self.axes2.set_title( 'Y: %f +/- %f pix'%(meanErrY, stdErrY) )
self.axes2.legend( fontsize=12, loc='best' )
self.axes2.set_xlabel( "Equation number" )
self.axes2.set_ylabel( "Y-error estimate (pix)" )
return self.printPlot( dpi_key=u'plot_dpi' )
def plotLogisticWeights( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.12, 0.1, 0.80, 0.85] )
self.axes.hold(False)
pixError = np.sqrt( self.plotDict['errorXY'][:,0]**2 + self.plotDict['errorXY'][:,1]**2 )
peaksigVect = self.plotDict['peaksigVect']
# Mixing a log-plot with a linear-plot in a plotyy style.
self.axes.semilogy( peaksigVect, pixError, 'k.' )
# ax1.plot( peaksigVect, pixError, 'k.' )
self.axes.set_xlabel( 'Correlation peak significance, $\sigma$' )
self.axes.set_ylabel( 'Pixel registration error' )
self.axes.set_ylim( [0,1] )
self.axes.set_ylim( [1E-2, 1E2] )
self.axes.set_xlim( peaksigVect.min(), peaksigVect.max() )
if 'peaksigThres' in self.plotDict:
# Twinx not working with custom sizes?
self.axes2 = self.axes.twinx()
self.fig.add_axes( self.axes2 )
# Plot threshold sigma value
self.axes2.plot( [self.plotDict['peaksigThres'], self.plotDict['peaksigThres']], [0.0, 1.0], '--',
color='firebrick', label=r'$\sigma_{thres} = %.2f$'%self.plotDict['peaksigThres'] )
# Plot the logistics curve
peakSig = np.arange( np.min(peaksigVect), np.max(peaksigVect), 0.05 )
weights = zorro.util.logistic( peakSig, self.plotDict['peaksigThres'], self.plotDict['logisticK'], self.plotDict['logisticNu'] )
self.axes2.plot( peakSig, weights,
label=r"Weights $K=%.2f$, $\nu=%.3f$"%( self.plotDict['logisticK'], self.plotDict['logisticNu']), color='royalblue' )
if 'cdfPeaks' in self.plotDict:
self.axes2.plot( self.plotDict['hSigma'], self.plotDict['cdfPeaks'], '+', label = r'$\sigma-$CDF', color='slateblue' )
lines1, labels1 = self.axes.get_legend_handles_labels()
if bool( self.axes2 ):
lines2, labels2 = self.axes2.get_legend_handles_labels()
self.axes2.legend( lines1 + lines2, labels1 + labels2, loc='best', fontsize=14 )
else:
self.axes.legend( lines1, labels1, loc='best', fontsize=14 )
return self.printPlot( dpi_key=u'plot_dpi' )
def plotFRC( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.12, 0.1, 0.85, 0.85] )
self.axes.hold(False)
if not np.any(self.plotDict['FRC']):
print( "Warning, zorro_plotting: FRC is empty" )
return
FRC = self.plotDict['FRC']
inv_ps = 1.0 / (2.0* FRC.size *self.plotDict['pixelsize'] )
freqAxis = np.arange( FRC.size ) * inv_ps
# This is really ugly curve fitting here
#splineFRC = UnivariateSpline( freqAxis, FRC, s = 2.0 )
#splineAxis = np.linspace( freqAxis.min(), freqAxis.max(), 2048 )
# Maybe try fitting to a defocus OTF, it might be faster than the spline fitting.
self.axes.hold(True)
#self.axes.plot( splineAxis, splineFRC(splineAxis), 'r-' )
self.axes.plot( freqAxis, FRC, color='firebrick', marker='.',
markerfacecolor='k', markeredgecolor='k', label=self.plotDict['labelText'] )
self.axes.set_xlabel( r"Spatial frequency, $q$ ($nm^{-1}$)" )
self.axes.set_xlim( [freqAxis.min(), freqAxis.max()] )
self.axes.set_ylabel( "Fourier ring correlation" )
self.axes.legend( loc='best' )
self.axes.hold(False)
return self.printPlot( dpi_key=u'plot_dpi' )
def plotCTFDiag( self ):
self.fig.clear()
self.axes = self.fig.add_axes( [0.0, 0.0, 1.0, 1.0] )
self.axes.hold(False)
#print( "DEBUG: CTF4Diag shape = " + str(self.plotDict['CTF4Diag'].shape) )
#print( "DEBUG: CTF4Diag dtype = " + str(self.plotDict['CTF4Diag'].dtype) )
CTFInfo = self.plotDict['CTFInfo']
try:
mapCTF = self.axes.imshow( self.plotDict['CTFDiag'], cmap=self.plotDict['image_cmap'] )
except:
print( "WARNING: Could not render CTF Diagnostic image, TODO: switch to disk version" )
# print( " CTFDiag.shape = " + str( self.plotDict['CTFDiag'].shape ) + ", dtype = " + str( self.plotDict['CTFDiag'].dtype) )
# Try the dead version instead? I need checks in the plotting functions to see if the data
# exists and if not nicely switch to live/dead
return
if 'pixelsize' in self.plotDict:
inv_ps = 1.0 / (self.plotDict['CTFDiag'].shape[0] * self.plotDict['pixelsize'] )
zorro.util.plotScalebar( mapCTF, inv_ps, units=u'nm^{-1}' )
if 'title' in self.plotDict:
self.title = self.plotDict['title']
results = (u"$DF_1:\/%.1f\/\AA$\n"%CTFInfo['DefocusU'] +
u"$DF_2:\/%.1f\/\AA$\n"%CTFInfo['DefocusV'] +
u"$\gamma:\/%.1f^\circ$\n"%CTFInfo['DefocusAngle']+
u"$R:\/%.3f$\n"%CTFInfo['CtfFigureOfMerit'] +
u"$Fit\/res:\/%.1f\/\AA$"%CTFInfo['FinalResolution'] )
infobox = matplotlib.offsetbox.AnchoredText( results, pad=0.5, loc=1, prop={'size':16} )
self.axes.add_artist( infobox )
self.axes.set_axis_off() # This is still not cropping properly...
return self.printPlot( dpi_key=u'plot_dpi' )
def plotStats( self ):
# Setup unicode statistics dictionary
#matplotlib.rc('font', family='DejaVu Sans')
statsDict = collections.OrderedDict()
if 'pixlsize' in self.plotDict:
statsDict[u'Pixel size (nm):'] = "%.4f"%self.plotDict['pixelsize']
if 'voltage' in self.plotDict:
statsDict[u'Accelerating voltage (kV):'] = "%.1f"%self.plotDict['voltage']
if 'C3' in self.plotDict:
statsDict[u'Spherical aberration, C3 (mm):'] = "%.1f"%self.plotDict['C3']
if 'meanPeaksig' in self.plotDict:
statsDict[u'Peak significance:'] = u"%.2f"%self.plotDict['meanPeaksig'] + u" ± %.2f"%self.plotDict['stdPeaksig']
try:
CTFInfo = self.plotDict['CTFInfo']
statsDict[u'CTF defocus #1 (Å):'] = "%.1f"%CTFInfo['DefocusU']
statsDict[u'CTF defocus #2 (Å):'] = "%.1f"%CTFInfo['DefocusV']
statsDict[u'CTF gamma (°):'] = "%.4f"%CTFInfo['DefocusAngle']
statsDict[u'CTF correlation coefficient :'] = "%.5f"%CTFInfo['CtfFigureOfMerit']
statsDict[u'CTF maximum fit frequency (Å) :'] = "%.1f"%CTFInfo['FinalResolution']
except:
pass
# Print the statistical metrics
self.fig.clear()
self.axes.get_xaxis().set_visible(False)
self.axes.get_yaxis().set_visible(False)
fontsize = 12
fontfigspacing = float(fontsize*1.5) / (self.fig.dpi * self.fig.get_size_inches()[1])
keycount = 0
for key, value in statsDict.items():
self.fig.text( fontfigspacing, 1 - (1+keycount)*fontfigspacing, key, size=fontsize )
self.fig.text( 0.5+fontfigspacing, 1 - (1+keycount)*fontfigspacing, value, size=fontsize )
keycount += 1
return self.printPlot( dpi_key=u'plot_dpi' )
##################################################################################
#################### Static interface for multiprocessing ##########################
##################################################################################
# Pickle can't serialize Python objects well enough to launch functions of a
# class in a multiprocessing pool, so we need to call a static function to do the
# branching.
def generate( params ):
"""
Maybe the multiprocessing pool should be here, completely outside of Zorro...
"""
plotType = params[0]
plotDict = params[1]
if 'show' in plotDict and bool( plotDict['show'] ):
print( "zorro_plotting.generate(): Cannot render plots to screen from multiprocessing module." )
plotDict['show'] = False
# Maybe a dictionary that likes plotType to a function is smarter? I don't know if we can if it's not
# been instantiated.
daPlot = zorroPlot( backend=plotDict['backend'] )
daPlot.plotDict = plotDict # Override -- this can avoid some passing-by-value
if plotType == 'translations':
return daPlot.plotTranslations()
elif plotType == 'pixRegError':
return daPlot.plotPixRegError()
elif plotType == 'image':
return daPlot.plotImage()
elif plotType == 'FFT':
return daPlot.plotFFT()
elif plotType == 'polarFFT':
return daPlot.plotPolarFFT()
elif plotType == 'stats':
return daPlot.plotStats()
elif plotType == 'peaksigTriMat':
return daPlot.plotPeaksigTriMat()
elif plotType == 'logisticWeights':
return daPlot.plotLogisticWeights()
elif plotType == 'lazyFRC':
return daPlot.plotFRC()
elif plotType == 'CTFDiag':
return daPlot.plotCTFDiag()
elif plotType == 'corrTriMat':
return daPlot.plotCorrTriMat()
IMS_HELPTEXT = """
Usage: ims <image_filename> <cutoff level>
Valid types: .dm4, .mrc, .mrcs, .mrcz, .mrczs
Shows individual frames in the 3D image (dimensions organized as [z,x,y]).
"f" shows the view in full-screen
"n" next frame, ("N" next by step of 10)
"p" previous frame, ("P" previous by step of 10)
"l" toogles the log scale.
"y" toggles polar transform
"F" toggles Fourier transform
"c" swithces between gray, gnuplot, jet, nipy_spectral colormaps.
"h" turns on histogram-based contrast limits
"b" hides/shows boxes (searches for _automatch.box file )
"i" zooms in
"o" zooms out
"v" transposes (revolves) the axes so a different projection is seen.
"arrows" move the frame around
"g" gaussian low-pass ( sharpen more with 'k', smoothen more with 'm')
"r" resets the position to the center of the frame
"q" increase the contrast limits ("Q" is faster)
"w" decrease the contrast limits ("W" is faster)
"R" reset contrast to default
"s" saves current view as PNG
"S" shows sum projection
"M" shows max projection
"V" shows var projection
"t" print statistics for current frame
"T" prints statistics for entire stack
"""
class ims(object):
IMS_HELPTEXT
plt.rcParams['keymap.yscale'] = '' # to disable the binding of the key 'l'
plt.rcParams['keymap.pan'] = '' # to disable the binding of the key 'p'
plt.rcParams['keymap.grid'] = '' # to disable the binding of the key 'g'
plt.rcParams['keymap.zoom'] = '' # to disable the binding of the key 'o'
def __init__(self, im, index=0, titles=[u"",], logMode=False, fftMode=False, polarMode=False, blocking=False ):
plt.ion()
#plt.pause(1E-4)
self.im = im
self.index = index
self.cmaps_cycle = itertools.cycle( [u"gray", u"gnuplot", u"jet", u"nipy_spectral"] )
self.cmap = next( self.cmaps_cycle )
self.exiting = False
self.logMode = logMode
self.polarMode = polarMode
self.fftMode = fftMode
self.sigmaMode = True
self.filterMode = False
self.__gaussSigma = 1.5
self.doTranspose = False
self.filename = None
self.titles = titles
self.__currTitle = ""
self.__sigmaLevels = np.hstack( [np.array( [0.01, 0.02, 0.04, 0.06, 0.08]),
np.arange( 0.1, 20.1, 0.1 )])
self.__sigmaIndex = 31 # 3.0 sigma by default
self.blocking = blocking
self.showBoxes = True
self.boxLen = 0
self.boxYX = None
self.boxFoM = None
print( "ims: type(im) = %s" % type(im) )
if sys.version_info >= (3,0):
if isinstance( self.im, str ):
self.loadFromFile( im )
else: # Python 2
if isinstance( self.im, str ) or isinstance(self.im, unicode):
self.loadFromFile( im )
if isinstance( self.im, tuple) or isinstance( self.im, list):
# Gawd tuples are annoyingly poorly typedefed
self.im = np.array( self.im )
print( "shape of tupled array: " + str(self.im.shape) )
# Don't even bother checking, the complex representation needs to be re-written anyway
self.complex = False
if self.im.ndim is 2:
if np.iscomplex(self.im).any():
self.complex = True
self.im = np.array( [np.hypot( np.real(self.im), np.imag(self.im)),np.angle(self.im)] )
print( "DEBUG: complex self.im.shape = %s" % str(self.im.shape) )
self.__imCount = 2
self.frameShape = self.im.shape[1:]
else:
self.complex = False
self.frameShape = self.im.shape
self.__imCount = 1
elif self.im.ndim is 3:
if np.iscomplex( self.im ).any():
self.im = np.hypot( np.real(self.im), np.imag(self.im) )
self.complex = False
self.frameShape = self.im.shape[1:]
self.__imCount = self.im.shape[0]
self.__minList = np.nan * np.empty( self.__imCount ) # Could retrieve this from MRC files?
self.__maxList = np.nan * np.empty( self.__imCount ) # Could retrieve this from MRC files?
self.__meanList = np.nan * np.empty( self.__imCount ) # Could retrieve this from MRC files?
self.__stdList = np.nan * np.empty( self.__imCount ) # Could retrieve this from MRC files?
print( "IMS self.im.shape = %s" % str(self.im.shape) )
self.dtype = self.im.dtype
self.projToggle = False
self.zoom = 1
self.offx,self.offy = 0,0
self.stepXY = 24 # step of the movement up-down, left-right
self.offVmin,self.offVmax = 0,0
self.showProfiles = False
if not(self.showProfiles):
self.fig = plt.figure()
self.figNum = plt.get_fignums()[-1]
print( "Shown in figure %g."%self.figNum)
self.ax = self.fig.add_subplot(111)
else:
self.fig = plt.figure(figsize=(10,10))
self.ax = self.fig.axes
self.__setaxes__()
################
self.__recompute__()
self.fig.canvas.mpl_connect( 'key_press_event', self.__call__ )
self.fig.canvas.mpl_connect( 'close_event', self.__exit__ )
self.fig.canvas.mpl_connect( 'resize_event', self.__draw__ )
plt.show( block=self.blocking )
# plt.ion()
def loadFromFile(self, filename, loadBoxes=True ):
self.titles = self.im
print( "Try to load MRC or DM4 files" )
file_front, file_ext = os.path.splitext( self.im )
if (file_ext.lower() == ".mrc" or file_ext.lower() == ".mrcs" or
file_ext.lower() == ".mrcz" or file_ext.lower() == ".mrcsz"):
self.im, self.header = mrcz.readMRC( self.im, pixelunits=u'nm' )
elif file_ext.lower() == ".dm4":
dm4struct = mrcz.readDM4( self.im )
self.im = dm4struct.im[1].imageData
self.header = dm4struct.im[1].imageInfo
del dm4struct
else:
print( "Filename has unknown/unimplemented file type: " + self.im )
return
# Check for boxes
# Star files don't contain box sizes so use the box files instead
box_name = file_front + "_automatch.box"
if bool(self.showBoxes) and os.path.isfile( box_name ):
self.loadBoxFile( box_name )
return
# Try the star file instead
box_name = file_front + "_automatch.star"
if bool(self.showBoxes) and os.path.isfile( box_name ):
self.loadStarFile( box_name )
return
def loadBoxFile(self, box_name ):
box_data = np.loadtxt( box_name, comments="_" )
# box_data columns = [x_center, y_center, ..., ..., FigureOfMerit]
self.boxLen = box_data[0,2]
# In boxfiles coordinates are at the edges.
self.boxYX = np.fliplr( box_data[:,:2] )
# DEBUG: The flipping of the y-coordinate system is annoying...
print( "boxYX.shape = " + str(self.boxYX.shape) + ", len = " + str(self.boxLen) )
self.boxYX[:,0] = self.im.shape[0] - self.boxYX[:,0]
self.boxYX[:,1] += int( self.boxLen / 2 )
self.boxYX[:,0] -= int( self.boxLen/2)
try:
self.boxFoM = box_data[:,4]
clim = zorro.zorro_util.ciClim( self.boxFoM, sigma=2.5 )
self.boxFoM = zorro.zorro_util.normalize( np.clip( self.boxFoM, clim[0], clim[1] ) )
except:
self.boxFoM = np.ones( self.boxYX.shape[0] )
self.boxColors = plt.cm.gnuplot( self.boxFoM )
def loadStarFile(self, box_name ):
box_data = np.loadtxt( box_name, comments="_", skiprows=5 )
# box_data columns = [x_center, y_center, ..., ..., FigureOfMerit]
# In star files coordinates are centered
self.boxYX = np.fliplr( box_data[:,:2] )
# DEBUG: The flipping of the y-coordinate system is annoying...
self.boxYX[:,0] = self.im.shape[0] - self.boxYX[:,0]
# There's no box size information in a star file so we have to use a guess
self.boxLen = 224
#self.boxYX[:,1] -= int( self.boxLen / 2 )
#self.boxYX[:,0] += int( self.boxLen / 2 )
try:
self.boxFoM = box_data[:,4]
clim = zorro.zorro_util.ciClim( self.boxFoM, sigma=2.5 )
self.boxFoM = zorro.zorro_util.normalize( np.clip( self.boxFoM, clim[0], clim[1] ) )
except:
self.boxFoM = np.ones( self.boxYX.shape[0] )
self.boxColors = plt.cm.gnuplot( self.boxFoM )
def __setaxes__(self):
self.ax.cla()
################
# definitions for the axes
widthProf = 0.1
left, width = 0.05, 0.75
bottomProf = 0.05
bottom, height = widthProf + bottomProf + 0.05, 0.75
leftProf = left + width + 0.05
rect_im = [left, bottom, width, height]
rect_X = [left, bottomProf, width, widthProf] # horizontal
rect_Y = [leftProf, bottom, widthProf, height] # vertical
# start with a rectangular Figure
self.ax = plt.axes(rect_im)
self.axX = plt.axes(rect_X)
self.axY = plt.axes(rect_Y)
nullfmt = plt.NullFormatter() # no labels
self.axX.xaxis.set_major_formatter(nullfmt)
self.axX.yaxis.set_major_formatter(nullfmt)
self.axY.xaxis.set_major_formatter(nullfmt)
self.axY.yaxis.set_major_formatter(nullfmt)
self.posProfHoriz = np.round(self.frameShape[0]/2)
self.posProfVert = np.round(self.frameShape[1]/2)
def __recompute__(self):
self.__currTitle = ""
if self.doTranspose:
self.doTranspose = False
self.im = np.transpose( self.im, axes=[2,0,1] )
print( "Tranposed axes shape: %s" % str(self.im.shape) )
self.__setaxes__()
if self.im.ndim is 2:
self.im2show = self.im
elif self.im.ndim is 3:
self.im2show = np.squeeze( self.im[self.index,...] )
self.__currTitle = 'frame %d/%d' % (self.index, self.im.shape[0]-1)
# projections
if self.projToggle:
if self.projType=='M':
self.im2show = self.im.max(axis=0)
self.__currTitle = 'max proj'
if self.projType=='S':
self.im2show = self.im.sum(axis=0)
self.__currTitle = 'sum proj'
if self.projType=='V':
self.im2show = np.var(self.im,axis=0)
self.__currTitle = 'var proj'
if self.complex:
self.__currTitle += ', cplx (0=abs,1=phase)'
if self.fftMode:
self.__currTitle += ", fft"
self.im2show = np.abs(np.fft.fftshift( np.fft.fft2( self.im2show ) ))
if self.polarMode:
self.__currTitle += ", polar"
self.im2show = zorro.zorro_util.img2polar( self.im2show )
if self.filterMode:
self.__currTitle += ", gauss%.2f" % self.__gaussSigma
self.im2show = ni.gaussian_filter( self.im2show, self.__gaussSigma )
if self.logMode:
# # TODO: this can be sent to matplotlib as an argument in imshow instead
self.__currTitle += ', log10'
if np.any(self.im <= 0.0):
# RAM: alternatively we could just add the minimum value to the whole matrix
self.im2show = np.log10( self.im2show - np.min( self.im2show ) + 1.0 )
else:
self.im2show = np.log10( self.im2show )
else:
self.__currTitle += ', lin'
# We need to compute image-wide statistics
if self.sigmaMode:
self.__meanList[self.index] = np.mean( self.im2show )
self.__stdList[self.index] = np.std( self.im2show )
else:
self.__minList[self.index] = np.min( self.im2show )
self.__maxList[self.index] = np.max( self.im2show )
self.__draw__()
def __draw__(self, info=None ):
# print( "Called ims.draw()" )
plt.cla()
tit = self.__currTitle + ""
if self.zoom > 1:
tit += ', zoom %g x'%(self.zoom)
center_y = np.int( self.frameShape[0]/2 )
center_x = np.int( self.frameShape[1]/2 )
halfWidth_y = np.int( 0.5* self.frameShape[0]/self.zoom )
halfWidth_x = np.int( 0.5* self.frameShape[1]/self.zoom )
im_range = [ np.maximum( 0, center_x-halfWidth_x),
np.minimum( self.frameShape[1], center_x+halfWidth_x ),
np.maximum( 0, center_y-halfWidth_y),
np.minimum( self.frameShape[0], center_y+halfWidth_y ) ]
if self.sigmaMode:
if np.isnan( self.__meanList[self.index] ):
self.__meanList[self.index] = np.mean( self.im2show )
self.__stdList[self.index] = np.std( self.im2show )
clim_min = self.__meanList[self.index] - self.__sigmaLevels[self.__sigmaIndex]*self.__stdList[self.index]
clim_max = self.__meanList[self.index] + self.__sigmaLevels[self.__sigmaIndex]*self.__stdList[self.index]
tit += ", $\sigma$%.2f clim[%.1f,%.1f]" % (self.__sigmaLevels[self.__sigmaIndex], clim_min, clim_max)
else:
if np.isnan( self.__minList[self.index] ):
self.__minList[self.index] = np.min( self.im2show )
self.__maxList[self.index] = np.max( self.im2show )
clim_min = self.__minList[self.index]
clim_max = self.__maxList[self.index]
tit += ", clim[%.1f,%.1f]" % (clim_min, clim_max)
# LogNorm really isn't very failsafe...
# if self.logMode:
# norm = col.LogNorm()
# else:
# norm = None
norm = None
self.ax.set_title( tit )
self.ax.imshow(self.im2show[ im_range[2]:im_range[3], im_range[0]:im_range[1] ],
vmin=clim_min, vmax=clim_max,
interpolation='none',
norm=norm,
extent=im_range,
cmap=self.cmap )
# plt.colorbar(self.ax)
# Printing particle box overlay
if bool(self.showBoxes) and np.any(self.boxYX) != None and self.boxLen > 0:
# Coordinate systems are upside-down in y-axis?
# box2 = int( self.boxLen/4 )
dpi = self.fig.get_dpi()
width = np.minimum( self.fig.get_figwidth(), self.fig.get_figheight() )
# Ok I'm not getting draw events from resizing...
markerSize = (self.boxLen*width/dpi)**2
print( "dpi = %d, width = %g, markerSize = %g" %(dpi,width, markerSize) )
#for J in np.arange( self.boxYX.shape[0] ):
# box = self.boxYX[J,:]
#boxCoord = np.array( [box+[-box2,-box2], box+[-box2,box2],
# box+[box2,box2],
# box+[box2,-box2], box+[-box2,-box2] ] )
# self.ax.scatter( self.boxYX[:,1], self.boxYX[:,0], s=markerSize, color=colors, alpha=0.3 )
self.ax.scatter( self.boxYX[:,1], self.boxYX[:,0],
s=markerSize, color=self.boxColors, alpha=0.2, marker='s' )
plt.xlim( [im_range[0], im_range[1] ] )
plt.ylim( [im_range[2], im_range[3] ] )
# RAM: This format_coord function is amazingly sensitive to minor changes and often breaks
# the whole class.
# DO NOT TOUCH format_coord!!!!
def format_coord(x, y):
x = np.int(x + 0.5)
y = np.int(y + 0.5)
try:
#return "%s @ [%4i, %4i]" % (round(im2show[y, x],2), x, y)
return "%.5G @ [%4i, %4i]" % (self.im2show[y, x], y, x) #first shown coordinate is vertical, second is horizontal
except IndexError:
return ""
self.ax.format_coord = format_coord
# DO NOT TOUCH format_coord!!!!
if isinstance(self.titles, (list,tuple)) and len(self.titles) > 0:
try:
self.fig.canvas.set_window_title(self.titles[self.index])
except:
self.fig.canvas.set_window_title(self.titles[0])
elif isinstance( self.titles, str ):
self.fig.canvas.set_window_title(self.titles)
if 'qt' in plt.matplotlib.get_backend().lower():
self.fig.canvas.manager.window.raise_() #this pops the window to the top
# TODO: X-Y profiles
# if self.showProfiles:
# posProf = self.posProfHoriz
# self.axX.cla()
# self.axX.plot(rx+1,self.im2show[posProf,rx])
## plt.xlim(rx[0],rx[-1])
# self.axX.set_xlim(rx[0],rx[-1])
plt.show( block=self.blocking )
def printStat(self, mode='all'):
if mode == 'all':
modePrint = 'all frames'
img = self.im
if self.complex:
modePrint = 'the modulus'
img = self.im[0,...]
elif mode == 'curr':
if self.im.ndim > 2:
img = self.im[self.index, ...]
modePrint = 'frame %d'%self.index
else:
img = self.im
modePrint = 'the current frame'
else:
print( "Unknown statistics mode: %s" % mode )
return
print( "===========================================" )
print( "Statistics of " + modePrint + " in figure %g:"%self.figNum)
print( "Shape: ", img.shape )
print( "Maximum: ", img.max(), "@", np.unravel_index(np.argmax(img),img.shape))
print( "Minimum: ", img.min(), "@", np.unravel_index(np.argmin(img),img.shape))
print( "Center of mass:", ni.measurements.center_of_mass(img))
print( "Mean: ", img.mean())
print( "Standard deviation: ", img.std())
print( "Variance: ", img.var() )
print( "Sum: ", img.sum())
print( "Data type:", self.dtype)
print( "===========================================" )
def __exit__(self, event):
print( "Exiting IMS" )
self.exiting = True
self.fig.close()
def __call__(self, event):
redraw = False
recompute = False
# print( "Received key press %s" % event.key )
if event.key=='n':#'up': #'right'
if self.im.ndim > 2:
self.index = np.minimum(self.im.shape[0]-1, self.index+1)
recompute = True
elif event.key == 'p':#'down': #'left'
if self.im.ndim > 2:
self.index = np.maximum(0, self.index-1)
recompute = True
if event.key=='N':#'up': #'right'
if self.im.ndim > 2:
self.index = np.minimum(self.im.shape[0]-1, self.index+10)
recompute = True
elif event.key == 'P':#'down': #'left'
if self.im.ndim > 2:
self.index = np.maximum(0, self.index-10)
recompute = True
elif event.key == 'v':
self.doTranspose = True
recompute = True
elif event.key == 'l':
self.logMode = not self.logMode
recompute = True
elif event.key == 'c':
self.cmap = next( self.cmaps_cycle)
redraw = True
elif event.key == 'b':
self.showBoxes = not self.showBoxes
redraw = True
elif event.key == 'h':
self.sigmaMode = not self.sigmaMode
redraw = True
elif event.key == 'g':
self.filterMode = not self.filterMode
recompute = True
elif event.key == 'k':
self.__gaussSigma /= 1.5
if self.filterMode:
recompute = True
elif event.key == 'm':
self.__gaussSigma *= 1.5
if self.filterMode:
recompute = True
elif event.key == 'F': # FFT
self.fftMode = not self.fftMode
recompute = True
elif event.key == 'y': # polar (cYlindrical)
self.polarMode = not self.polarMode
recompute = True
elif event.key in 'SMV':
self.projToggle = not self.projToggle
self.projType = event.key
recompute = True
elif event.key == 'i':
if 4*self.zoom < np.min(self.im.shape[1:]): # 2*zoom must not be bigger than shape/2
self.zoom = 2*self.zoom
redraw = True
elif event.key == 'o':
self.zoom = np.maximum(self.zoom/2,1)
redraw = True
elif event.key == 'right':
self.offx += self.stepXY
self.offx = np.minimum(self.offx,self.im.shape[1]-1)
redraw = True
elif event.key == 'left':
self.offx -= self.stepXY
self.offx = np.maximum(self.offy,-self.im.shape[1]+1)
redraw = True
elif event.key == 'down':
self.offy += self.stepXY
self.offy = np.minimum(self.offx,self.im.shape[2]-1)
redraw = True
elif event.key == 'up':
self.offx -= self.stepXY
self.offx = np.maximum(self.offx,-self.im.shape[2]+1)
redraw = True
elif event.key == 'r': # reset position to the center of the image
self.offx,self.offy = 0,0
print( "Reseting positions to the center.")
redraw = True
elif event.key == 'R': # reset contrast
self.offVmin,self.offVmax = 0,0
print( "Reseting contrast.")
redraw = True
elif event.key == 'q': # increase contrast
self.__sigmaIndex = np.maximum( self.__sigmaIndex-1, 0 )
redraw = True
elif event.key == 'Q': # increase contrast quickly
self.__sigmaIndex = np.maximum( self.__sigmaIndex-10, 0 )
redraw = True
elif event.key == 'w': # decrease contrast
self.__sigmaIndex = np.minimum( self.__sigmaIndex+1, self.__sigmaLevels.size-1 )
redraw = True
elif event.key == 'W': # decrease contrast quickly
self.__sigmaIndex = np.minimum( self.__sigmaIndex+10, self.__sigmaLevels.size-1 )
redraw = True
# print "Increasing upper limit of the contrast: %g %% (press R to reset).\n"%round(self.offVmax*100)
elif event.key == 'T': # print statistics of the whole dataset
self.printStat()
redraw = False
elif event.key == 't': # print statistics of the current frame
self.printStat(mode = 'curr'),
redraw = False
else:
# Apparently we get multiple key-press events so don't do any error handling here.
pass
# Recompute is dominant over draw
if recompute:
self.__recompute__()
elif redraw:
self.__draw__()
# self.fig.canvas.draw()
def im(my_img,ax=None,**kwargs):
"Displays image showing the values under the cursor."
if ax is None:
ax = plt.gca()
def format_coord(x, y):
x = np.int(x + 0.5)
y = np.int(y + 0.5)
val = my_img[y,x]
try:
return "%.4E @ [%4i, %4i]" % (val, x, y)
except IndexError:
return ""
ax.imshow(my_img,interpolation='nearest',**kwargs)
ax.format_coord = format_coord
plt.colorbar()
plt.draw()
plt.show()
def imTiles(d,sizeX=None,titNum=True):
"Displays the stack of images in the composed tiled figure."
if sizeX==None:
sizeX=np.ceil(np.sqrt(d.shape[0]))
sizeY=np.ceil(d.shape[0]/sizeX)
plt.figure(figsize=(sizeY, sizeX))
for i in np.arange(1,d.shape[0]+1):
plt.subplot(sizeX,sizeY,i)
plt.imshow(d[i-1],interpolation='nearest')
plt.xticks([])
plt.yticks([])
if titNum:
plt.title(str(i-1))
def complex2rgbalog(s,amin=0.5,dlogs=2):
"""
Displays complex image with intensity corresponding to the log(MODULUS) and color (hsv) correponging to PHASE.
From: pyVincent/ptycho.py
"""
ph=np.anlge(s)
t=np.pi/3
nx,ny=s.shape
rgba=np.zeros((nx,ny,4))
rgba[:,:,0]=(ph<t)*(ph>-t) + (ph>t)*(ph<2*t)*(2*t-ph)/t + (ph>-2*t)*(ph<-t)*(ph+2*t)/t
rgba[:,:,1]=(ph>t) + (ph<-2*t) *(-2*t-ph)/t+ (ph>0)*(ph<t) *ph/t
rgba[:,:,2]=(ph<-t) + (ph>-t)*(ph<0) *(-ph)/t + (ph>2*t) *(ph-2*t)/t
a=np.log10(np.abs(s)+1e-20)
a-=a.max()-dlogs # display dlogs orders of magnitude
rgba[:,:,3]=amin+a/dlogs*(1-amin)*(a>0)
return rgba
def complex2rgbalin(s):
"""
Displays complex image with intensity corresponding to the MODULUS and color (hsv) correponging to PHASE.
From: pyVincent/ptycho.py
"""
ph=np.angle(s)
t=np.pi/3
nx,ny=s.shape
rgba=np.zeros((nx,ny,4))
rgba[:,:,0]=(ph<t)*(ph>-t) + (ph>t)*(ph<2*t)*(2*t-ph)/t + (ph>-2*t)*(ph<-t)*(ph+2*t)/t
rgba[:,:,1]=(ph>t) + (ph<-2*t) *(-2*t-ph)/t+ (ph>0)*(ph<t) *ph/t
rgba[:,:,2]=(ph<-t) + (ph>-t)*(ph<0) *(-ph)/t + (ph>2*t) *(ph-2*t)/t
a=np.abs(s)
a/=a.max()
rgba[:,:,3]=a
return rgba
def colorwheel(col='black'):
"""
Color wheel for phases in hsv colormap.
From: pyVincent/ptycho.py
"""
xwheel=np.linspace(-1,1,100)
ywheel=np.linspace(-1,1,100)[:,np.newaxis]
rwheel=np.sqrt(xwheel**2+ywheel**2)
phiwheel=-np.arctan2(ywheel,xwheel) # Need the - sign because imshow starts at (top,left)
# rhowheel=rwheel*np.exp(1j*phiwheel)
rhowheel=1*np.exp(1j*phiwheel)
plt.gca().set_axis_off()
rgba=complex2rgbalin(rhowheel*(rwheel<1))
plt.imshow(rgba,aspect='equal')
plt.text(1.1, 0.5,'$0$',fontsize=14,horizontalalignment='center',verticalalignment='center',transform = plt.gca().transAxes,color=col)
plt.text(-.1, 0.5,'$\pi$',fontsize=16,horizontalalignment='center',verticalalignment='center',transform = plt.gca().transAxes,color=col)
def insertColorwheel(left=.7, bottom=.15, width=.1, height=.1,col='black'):
"""
Inserts color wheel to the current axis.
"""
plt.axes((left,bottom,width,height), axisbg='w')
colorwheel(col=col)
# plt.savefig('output.png',bbox_inches='tight', pad_inches=0)
def insertColorbar(fig,im,left=.7, bottom=.1, width=.05, height=.8 ) :
"""
Inserts color bar to the current axis.
"""
cax = fig.add_axes((left,bottom,width,height), axisbg='w')
plt.colorbar(im, cax=cax)
def showCplx(im,mask=0,pixSize_um=1,showGrid=True,modulusLog = False,maskPhase = False, maskPhaseThr = 0.01, cmapModulus = 'jet', cmapPhase = 'hsv', scalePhaseImg = True):
"Displays MODULUS and PHASE of the complex image in two subfigures."
if modulusLog:
modulus = np.log10(np.abs(im))
else:
modulus = np.abs(im)
phase = np.angle(im)
plt.figure(figsize=(8,4))
plt.subplot(121)
#plt.subplots_adjust(left=0.02, bottom=0.06, right=0.95, top=0.94, wspace=0.05)
#plt.imshow(abs(np.ma.masked_array(im,mask)))
plt.imshow(modulus,extent=(0,im.shape[1]*pixSize_um,0,im.shape[0]*pixSize_um),cmap=cmapModulus,interpolation='Nearest')
# plt.colorbar(m)
if showGrid:
plt.grid(color='w')
if pixSize_um !=1:
plt.xlabel('microns')
plt.ylabel('microns')
plt.title('Modulus')
# position=f.add_axes([0.5,0.1,0.02,.8]) ## the parameters are the specified position you set
# plt.colorbar(m,cax=position) ##
# plt.setp(ax_cb.get_yticklabels(), visible=False)
plt.subplot(122)
if scalePhaseImg:
vminPhase = -np.pi
vmaxPhase = np.pi
else:
vminPhase = phase.min()
vmaxPhase = phase.max()
plt.imshow(np.ma.masked_array(phase,mask),cmap=cmapPhase,interpolation='Nearest',vmin=vminPhase,vmax=vmaxPhase,extent=(0,im.shape[1]*pixSize_um,0,im.shape[0]*pixSize_um))
if showGrid:
plt.grid(color='k')
if pixSize_um !=1:
plt.xlabel('microns')
plt.ylabel('microns')
plt.title('Phase')
if cmapPhase == 'hsv':
insertColorwheel(left=.85)
plt.tight_layout()
def showLog(im, cmap='jet'):
"Displays log of the real image with correct colorbar."
f = plt.figure();
i = plt.imshow(im, norm=col.LogNorm(), cmap=cmap)
f.colorbar(i)
return f,i
def ca():
"""
Close all windows.
"""
plt.close('all')
def main():
# Get command line arguments
# First argument is the executed file
# print sys.argv
print( IMS_HELPTEXT )
fftMode = False
polarMode = False
logMode = False
if "--log" in sys.argv:
logMode = True
if "--fft" in sys.argv:
fftMode = True
logMode = True
if "--polarfft" in sys.argv:
fftMode = True
polarMode = True
logMode = True
# Blocking seems to interrupt key presses? I think I need a polling loop then.
# http://matplotlib.org/users/event_handling.html
#if os.name == "nt":
# blocking = True
#else:
blocking = False
imsObj = ims( sys.argv[1], logMode=logMode, fftMode=fftMode, polarMode=polarMode, blocking=blocking )
# plt.ion()
# Need to hold here.
# Doesn't work on Windows, why? Make plt.show( block=True ) call inside IMS instead
while not imsObj.exiting:
plt.pause(0.1)
sys.exit()
#### COMMAND-LINE INTERFACE ####
if __name__ == '__main__':
main() | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/zorro_plotting.py | zorro_plotting.py |
from zorro import ReliablePy
import numpy as np
import matplotlib.pyplot as plt
rln = ReliablePy.ReliablePy()
defocusThreshold = 40000
astigThreshold = 800
fomThreshold = 0.0
resThreshold = 6.0
rln.load( "micrographs_all_gctf.star" )
defocusU = rln.star['data_']['DefocusU']
defocusV = rln.star['data_']['DefocusV']
finalResolution = rln.star['data_']['FinalResolution']
ctfFoM = rln.star['data_']['CtfFigureOfMerit']
defocusMean = 0.5 * defocusU + 0.5 * defocusV
astig = np.abs( defocusU - defocusV )
[hDefocus, cDefocus] = np.histogram( defocusMean,
bins=np.arange(np.min(defocusMean),np.max(defocusMean),1000.0) )
hDefocus = hDefocus.astype('float32')
cDefocus = cDefocus[:-1] +1000.0/2
[hAstig, cAstig] = np.histogram( astig,
bins=np.arange(0, np.max(astig), 100.0) )
hAstig = hAstig.astype('float32')
cAstig = cAstig[:-1] +100.0/2
[hFoM, cFoM] = np.histogram( ctfFoM,
bins=np.arange(0.0,np.max(ctfFoM),0.005) )
hFoM = hFoM.astype('float32')
cFoM = cFoM[:-1] +0.005/2.0
[hRes, cRes] = np.histogram( finalResolution,
bins=np.arange(np.min(finalResolution),np.max(finalResolution),0.25) )
hRes = hRes.astype('float32')
cRes = cRes[:-1] +0.25/2.0
plt.figure()
plt.fill_between( cDefocus, hDefocus, np.zeros(len(hDefocus)), facecolor='steelblue', alpha=0.5 )
plt.plot( [defocusThreshold, defocusThreshold], [0, np.max(hDefocus)], "--", color='firebrick' )
plt.xlabel( "Defocus, $C_1 (\AA)$" )
plt.ylabel( "Histogram counts" )
plt.figure()
plt.fill_between( cAstig, hAstig, np.zeros(len(hAstig)), facecolor='forestgreen', alpha=0.5 )
plt.plot( [astigThreshold, astigThreshold], [0, np.max(hAstig)], "--", color='firebrick' )
plt.xlabel( "Astigmatism, $A_1 (\AA)$" )
plt.ylabel( "Histogram counts" )
plt.figure()
plt.fill_between( cFoM, hFoM, np.zeros(len(hFoM)), facecolor='darkorange', alpha=0.5 )
plt.plot( [fomThreshold, fomThreshold], [0, np.max(hFoM)], "--", color='firebrick' )
plt.xlabel( "Figure of Merit, $R^2$" )
plt.ylabel( "Histogram counts" )
plt.figure()
plt.fill_between( cRes, hRes, np.zeros(len(hRes)), facecolor='purple', alpha=0.5 )
plt.plot( [resThreshold, resThreshold], [0, np.max(hRes)], "--", color='firebrick' )
plt.xlabel( "Fitted Resolution, $r (\AA)$" )
plt.ylabel( "Histogram counts" )
#keepIndices = np.ones( len(defocusU), dtype='bool' )
keepIndices = ( ( defocusMean < defocusThreshold) & (astig < astigThreshold) &
(ctfFoM > fomThreshold ) & (finalResolution < resThreshold) )
print( "KEEPING %d of %d micrographs" %(np.sum(keepIndices), defocusU.size) )
for key in rln.star['data_']:
rln.star['data_'][key] = rln.star['data_'][key][keepIndices]
rln.saveDataStar( "micrographs_pruned_gctf.star" ) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/scripts/gctfHisto.py | gctfHisto.py |
import numpy as np
import zorro
import os, os.path
import glob
from zorro import ReliablePy
import subprocess
import matplotlib.pyplot as plt
# Find all .dm4 files in a directory that don't have a log and process them.
globPattern = "align/*zorro.mrc"
pixelsize = 0.1039 # in NANOMETERS
voltage = 300.0
C3 = 2.7
detectorPixelSize = 5.0
print( "#################################################################################" )
print( " CHECK FOR CORRECTNESS: \n pixelsize = %f nm\n voltage = %.1f kV\n C3 = %.2f mm\n detectorPixelSize = %.1f um" %(pixelsize, voltage, C3, detectorPixelSize) )
print( "#################################################################################" )
filenames = glob.glob( globPattern )
# Open the first one and grab the pixelsize
N = len(filenames)
CTFInfo = {}
CTFInfo['DefocusU'] = np.zeros( [N], dtype='float32' )
CTFInfo['DefocusV'] = np.zeros( [N], dtype='float32' )
CTFInfo['FinalResolution'] = np.zeros( [N], dtype='float32' )
CTFInfo['DefocusAngle'] = np.zeros( [N], dtype='float32' )
CTFInfo['CtfFigureOfMerit'] = np.zeros( [N], dtype='float32' )
# Better approach is probably to call this in batch mode. Then I get the all_micrographs_gctf.star file!
ctfReg = zorro.ImageRegistrator()
ctfReg.n_threads = 16
ctfReg.savePNG = True
ctfReg.files['figurePath'] = '../figs/'
ctfReg.plotDict["imageSum"] = False
ctfReg.plotDict["imageFirst"] = False
ctfReg.plotDict["FFTSum"] = False
ctfReg.plotDict["polarFFTSum"] = False
ctfReg.plotDict["corrTriMat"] = False
ctfReg.plotDict["shiftsTriMat"] = False
ctfReg.plotDict["peaksigTriMat"] = False
ctfReg.plotDict["errorTriMat"] = False
ctfReg.plotDict["translations"] = False
ctfReg.plotDict["pixRegError"] = False
ctfReg.plotDict["CTF4Diag"] = True
ctfReg.plotDict["logisticsCurve"] = False
ctfReg.plotDict["Transparent"] = False
ctfReg.plotDict["dpi"] = 200
# Apparently I have to write
gctf_exec = "gctf %s --apix %f --kV %f --cs %f --dstep %f --do_EPA 1 --logsuffix _ctffind3.log" % (
globPattern, pixelsize*10, voltage, C3, detectorPixelSize )
devnull = open(os.devnull, 'w' )
#sub = subprocess.Popen( gctf_exec, shell=True, stdout=devnull, stderr=devnull )
sub = subprocess.Popen( gctf_exec, shell=True )
sub.wait()
# TODO: generate all the CTF diagnostic outputs? Clean up files.
rlnCTF = ReliablePy.ReliablePy()
rlnCTF.load( "micrographs_all_gctf.star" )
Nbins = 2.0*np.int( np.sqrt( rlnCTF.star['data_']['DefocusU'].size ) )
hDefocusU, xDefocusU = np.histogram( rlnCTF.star['data_']['DefocusU'], bins=Nbins )
hDefocusU = hDefocusU.astype('float32'); xDefocusU = xDefocusU[:-1]
hDefocusV, xDefocusV = np.histogram( rlnCTF.star['data_']['DefocusV'], bins=Nbins )
hDefocusV = hDefocusV.astype('float32'); xDefocusV = xDefocusV[:-1]
hR2, xR2 = np.histogram( rlnCTF.star['data_']['CtfFigureOfMerit'], bins=Nbins )
hR2 = hR2.astype('float32'); xR2 = xR2[:-1]
hResolution, xResolution = np.histogram( rlnCTF.star['data_']['FinalResolution'], bins=Nbins )
hResolution = hResolution.astype('float32'); xResolution = xResolution[:-1]
plt.figure()
plt.plot( xDefocusU, hDefocusU, '.-', label='DefocusU' )
plt.plot( xDefocusV, hDefocusV, '.-', label='DefocusV' )
plt.xlabel( "Defocus, $C1$ ($\AA$)" )
plt.ylabel( "Histogram counts" )
plt.legend( loc='best' )
plt.savefig( "histogram_gctf_defocus.png" )
plt.figure()
plt.plot( xResolution, hResolution, '.-', label='Resolution' )
plt.xlabel( "Estimated resolution ($\AA$)" )
plt.ylabel( "Histogram counts" )
plt.legend( loc='best' )
plt.savefig( "histogram_gctf_resolution.png" )
plt.figure()
plt.plot( xR2, hR2, '.-', label='$R^2$' )
plt.xlabel( "CTF Figure of Merit, $R^2$ (a.u.)" )
plt.ylabel( "Histogram counts" )
plt.legend( loc='best' )
plt.savefig( "histogram_gctf_R2.png" )
# TODO: throw-out outliers in resolution and the R2 values? | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/scripts/zorroCTFAnalysis.py | zorroCTFAnalysis.py |
import os, os.path, subprocess, sys, shutil
import numpy as np
import numexprz as ne
import time
import psutil
import matplotlib.pyplot as plt
#from matplotlib import collections
import zorro
plt.rcParams['lines.linewidth'] = 1.0
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.size'] = 16.0
def normalize(a):
""" Normalizes the input to the range [0.0,1.0].
Returns floating point if integer data is passed in."""
if np.issubdtype( a.dtype, np.integer ):
a = a.astype( 'float' )
amin = a.min()
arange = (a.max() - amin)
a -= amin
a /= arange
return a
def countPhysicalProcessors():
cpuInfo = ne.cpu.info
physicalIDs = []
for J, cpuDict in enumerate( cpuInfo ):
if not cpuDict['physical id'] in physicalIDs:
physicalIDs.append( cpuDict['physical id'] )
return len( physicalIDs )
def getMHz( cpuVirtualCores ):
strMHz = subprocess.check_output( "cat /proc/cpuinfo | grep MHz", shell=True ).split()
# Every fourth one is a MHz
cpuMHz = np.zeros( len(strMHz)/4, dtype='float32' )
for J in np.arange( 0, cpuVirtualCores ):
cpuMHz[J] = np.float32( strMHz[4*J + 3 ] )
return cpuMHz
def getCpuUsage( percpu = False):
# This must be called at the start of a loop to get the initial the values in /proc/stat
if percpu:
return psutil.cpu_percent( percpu=True )
else:
return psutil.cpu_percent()
def getMemUsage():
mem = psutil.virtual_memory()
return( [mem.used, mem.percent] )
def getDiskReadWrite():
diskIO = psutil.disk_io_counters()
return( [diskIO.read_time, diskIO.write_time] )
class benchZorro( object ):
def __init__(self, maxSamples = 3600, sleepTime = 1.0 ):
self.maxSamples = maxSamples
self.sleepTime = sleepTime
self.cpuInfo = ne.cpu.info
self.cpuVirtualCores = len( self.cpuInfo )
self.cpuModel = self.cpuInfo[0]['model name']
self.cpuAdvertisedMHz = 1000.0 * np.float32(self.cpuModel.split('@')[1].rstrip('GHz'))
self.cpuPowerManagement = self.cpuInfo[0]['power management']
self.cpuFlags = self.cpuInfo[0]['flags'].split() # Look for 'sse', 'sse2', 'avx', 'fma' for FFTW compilation
self.cpuCacheSize = np.float32( self.cpuInfo[0]['cache size'][:-3] ) # Should be kiloBytes
self.cpuCoresPerProcessor = np.int(ne.cpu.info[0]['cpu cores'])
self.cpuPhysicalCores = self.cpuCoresPerProcessor * countPhysicalProcessors()
if len(self.cpuInfo) == 2*self.cpuPhysicalCores:
self.hyperthreading = True
else:
self.hyperthreading = False
self.zorroDefault = zorro.ImageRegistrator()
self.zorroDefault.diagWidth = 5
self.zorroDefault.CTFProgram = None
self.zorroDefault.filterMode = None
self.zorroDefault.doLazyFRC = False
self.zorroDefault.savePNG = False
self.resetStats()
def resetStats(self):
self.index = 0
self.cpuMHz = np.zeros( [self.maxSamples, self.cpuVirtualCores] )
self.cpuUsage = np.zeros( [self.maxSamples, self.cpuVirtualCores] )
self.memUsage = np.zeros( [self.maxSamples, 2] )
self.rwUsage = np.zeros( [self.maxSamples, 2] )
self.timeAxis = np.zeros( [self.maxSamples] )
def updateStats(self):
# sys.stdout.write( "\r >> Bench %.1f" % (100.0*index/Ntest) + " %" ); sys.stdout.flush()
self.cpuMHz[self.index,:] = getMHz( self.cpuVirtualCores )
self.cpuUsage[self.index,:] = getCpuUsage( percpu = True )
self.memUsage[self.index,:] = getMemUsage()
self.rwUsage[self.index,:] = getDiskReadWrite()
self.timeAxis[self.index] = time.time()
self.index += 1
def finishStats(self):
self.index -= 1
self.timeAxis = self.timeAxis[:self.index+1]
self.cpuMHz = self.cpuMHz[:self.index+1,:]
self.cpuUsage = self.cpuUsage[:self.index+1,:]
self.memUsage = self.memUsage[:self.index+1,:]
self.rwUsage = self.rwUsage[:self.index+2,:]
self.timeAxis -= self.timeAxis[0]
self.cpuMHz_all_percent = np.mean( self.cpuMHz, axis=1 ) / self.cpuAdvertisedMHz * 100.0
self.cpuUsage_all = np.mean( self.cpuUsage, axis=1 )
# How to get % ioUsage? Just use normalize?
self.ioUsage = 100.0* normalize( np.sum( np.diff( self.rwUsage, axis=0 ).astype('float32'), axis=1 )[:self.index] )
def plotStats(self, N_processes, N_threads ):
fileExt = self.cpuModel.replace(" ","") + "_Nproc%d_Nthread%d.png" % (N_processes, N_threads)
plt.figure( figsize=(12,10) )
plt.plot( self.timeAxis, self.cpuMHz_all_percent, label = "CPU throttle", color='purple', linewidth=1.5 )
plt.plot( self.timeAxis, self.cpuUsage_all, label = "CPU usage", color='steelblue', linewidth=1.5 )
plt.plot( self.timeAxis, self.memUsage[:,1], label = "Memory usage", color='firebrick', linewidth=1.5 )
plt.plot( self.timeAxis[:-1], self.ioUsage, label="Disk IO (norm)", color='forestgreen', linewidth=1.5 )
plt.xlabel( "Time (s)" )
plt.ylabel( "Performance metrics (%)" )
plt.legend( loc='best' )
plt.title( "Benchmark for %s" % self.cpuModel + "\n $N_{processes}=%d, N_{threads}=%d$" %( N_processes, N_threads) )
# plt.ylim( [0, 140] )
plt.xlim( [0, self.timeAxis[-1]] )
plt.savefig( "Benchmark_" + fileExt )
##### Make a waterfall plot of CPU usage per processor
waterfallColors = plt.cm.gnuplot( np.linspace(0.0,1.0,self.cpuVirtualCores+1) )
# http://matplotlib.org/examples/api/collections_demo.html
cumsum_cpu = np.cumsum( self.cpuUsage, axis=1 )
cumsum_cpu = np.hstack( [np.zeros([cumsum_cpu.shape[0], 1]), cumsum_cpu])
plt.figure( figsize=(12,10) )
for J in np.arange(1,self.cpuVirtualCores+1):
#plt.plot( timeAxis, cumsum_cpu[:,J], color=waterfallColors[J] )
plt.fill_between( self.timeAxis, cumsum_cpu[:,J-1], cumsum_cpu[:,J], facecolor=waterfallColors[J], color=[0.0,0.0,0.0,0.3], linewidth=0.5, interpolate=True )
plt.xlim( [0, self.timeAxis[-1]] )
plt.xlabel( "Time (s)" )
plt.ylabel( "CPU utilization (%)" )
plt.title( "per CPU utilization for %s" % self.cpuModel + "\n $N_{processes}=%d, N_{threads}=%d$" %( N_processes, N_threads) )
plt.savefig( "perCPUBenchmark_" + fileExt )
def __str__(self):
returnstr = "##### CPU INFO #####\n"
returnstr += "Model: %s\n" % self.cpuModel
returnstr += "Power management scheme: %s\n" % self.cpuPowerManagement
returnstr += "Cache size: %s\n" % self.cpuInfo[0]['cache size']
returnstr += "Hyperthreading: %s\n" % self.hyperthreading
returnstr += "No. Physical Cores: %d\n" % self.cpuPhysicalCores
return returnstr
def benchmark( self, stackName, N_processes, N_threads ):
#dirName = os.path.dirname( stackName[0] )
#baseName = os.path.basename( stackName[0] )
# We need to make 3 copies of the stack
stackFront, stackExt = os.path.splitext( stackName )
stackName = [stackName]
N_cases = len( N_processes )
t_start = np.zeros( N_cases )
t_finish = np.zeros( N_cases )
maxMemory = np.zeros( N_cases )
meanCPUusage = np.zeros( N_cases )
if N_cases > 1:
# Force use of Agg if we are generating many plots.
plt.switch_backend( 'Agg' )
# Make copies of the input file to avoid file IO collisions
for J in np.arange( 1, np.max(N_processes) ):
newName = stackFront + str(J) + stackExt
stackName.append( newName )
print( "Copying %s to %s" % (stackName[0], stackName[J]) )
if not os.path.isfile( newName ):
shutil.copy( stackName[0], newName )
pass
for K in range(N_cases):
print( "##### STARTING BENCHMARK #%d, N_PROCS = %d, N_THREADS = %d" %(K, N_processes[K], N_threads[K] ) )
self.zorroDefault.n_threads = N_threads[K]
ProcList = []
self.resetStats()
for J in range( N_processes[K] ):
self.zorroDefault.files['stack'] = stackName[0]
self.zorroDefault.saveConfig( "stack%d.zor"%J )
t_start[K] = time.time()
self.updateStats()
# Start all the processes
for J in range( N_processes[K] ):
ProcList.append( subprocess.Popen( "zorro -c stack%d.zor"%J, shell=True ) )
# Poll the processes and also call our stats
finished = np.zeros( len(ProcList), dtype='bool' )
while self.index < self.maxSamples:
self.updateStats()
for I, P in enumerate(ProcList):
finished[I] = P.poll() != None
if np.all( finished ):
print( "Finished benchmark for N_processes: %d, N_threads: %d" % (N_processes[K], N_threads[K]))
break
time.sleep( self.sleepTime )
t_finish[K] = time.time()
self.finishStats()
self.plotStats( N_processes[K], N_threads[K] )
maxMemory[K] = np.max( self.memUsage[:,0] ) / 2**30 # GB
meanCPUusage[K] = np.mean( np.sum( bencher.cpuUsage, axis=1 ) ) #
t_consumed_per = (t_finish - t_start)/ N_processes
print( self.__str__() )
for K in range(N_cases):
print( "Case %d: %d processes, %d threads each, time per stack: %.3f s, CPU usage: %.2f, maximum Memory %.2f GB"
%( K, N_processes[K], N_threads[K], t_consumed_per[K], meanCPUusage[K], maxMemory[K] ) )
# Save a simple output file
np.max(self.memUsage[:,0])
np.savetxt( "bench"+self.cpuModel.replace(" ","")+".txt",
np.vstack( [N_processes, N_threads, t_consumed_per, meanCPUusage, maxMemory ] ).transpose(), fmt="%.2f",
header = "Benchmark for %s \n N_processes | N_threads | time_consumed_per_process | meanCPU | max Memory" % self.cpuModel )
if __name__ == "__main__":
bencher = benchZorro()
try:
stackName = sys.argv[1]
except IndexError:
print( "Usage: 'python zorro_benchmark someStack.dm4'" )
exit(1)
# Here's an example of setting up likely situations for processing, with a maximum of 4 processes
# This test mostly shows that hyperthreading makes Zorro slower, because the calculations are already
# block-optimized.
n_cases = 8
n_procs = [1,2,3,4,1,2,3,4]
max_threads = bencher.cpuVirtualCores
n_threads = [max_threads, max_threads/2, max_threads/3, max_threads/4, max_threads/2, max_threads/4,
max_threads/6, max_threads/8]
bencher.benchmark( stackName, n_procs, n_threads ) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/scripts/zorro_benchmark.py | zorro_benchmark.py |
import numpy as np
import zorro
import os, os.path, glob
import subprocess
####### PARAMETERS FOR ZORRO ######
n_threads = 16 # number of threads for numexpr and FFTW
savePNGs = False # Do you want Zorro diagnostic images?
pixelsize = 1.326 # pixel size in Angstroms
a = 4.0 # lattice parameter a, Angstroms
b = 4.0 # lattice parameter b, Angstroms
gamma = np.pi/4.0 # crystal rotation parammeter
outputFolder = "./zorro/"
fileDescriptor = "micrographs/*.dm4" # Can be single file, have wildcards, or be a Python list
# If we want support for the Falcon and DE-20, which are true 4k detectors, we need to change zorroReg.shapePadded too
# zorro.zorro_util.findValidFFTWDim() is useful here.
# Also Fourier cropping can change somewhat, depending on how far out Fourier spots are observed.
# I don't know if you have an autocorrelation routine to estimate this?
##### Zorro for 2dx Prototype script #####
def get_resolution( gamma, a, b):
"""
Calculate the largest periodicity from the [hkl] = [1,1], [0,1], [1,0] given gamma, a, b, c
gamma is radians
(a,b,c) are in Angstroms
pixelsize is in Angstroms
Returns resolution in inverse Angstroms (must be converted to pixels by Zorro)
"""
astar = 1.0 / (a * np.sin(gamma) )
bstar = 1.0 / (b * np.sin(gamma) )
# recgamma = np.pi - gamma
qstar010 = 1.0 / bstar**2.0
qstar100 = 1.0 / astar**2.0
return np.min( [qstar010, qstar100] )
print( "TODO: need to set larger padded size for Falcon and DE-20" )
zorroReg = zorro.ImageRegistrator()
zorroReg.pixelsize = pixelsize
zorroReg.maxShift = zorroReg.pixelsize * get_resolution( gamma, a, b )
zorroReg.preShift = True
print( "Estimated distance to first spot in FFT (pix): %f" % zorroReg.maxShift )
zorroReg.plotDict['transparent'] = False
zorroReg.CTFProgram = None
zorroReg.filterMode = 'dose,background'
zorroReg.n_threads = n_threads
if isinstance( fileDescriptor, list ) or isinstance( fileDescriptor, tuple ):
# Do nothing
fileList = fileDescriptor
elif isinstance( fileDescriptor, str ):
fileList = glob.glob( fileDescriptor )
# Normalize the path so we keep everything simple.
for fileName in fileList:
baseName = os.path.basename( fileName )
baseFront = os.path.splitext( baseName )
# Set individual file names and save a configuration file for each.
zorroReg.files = {}
zorroReg.files['figurePath'] = './fig'
zorroReg.files['config'] = baseName + ".log"
zorroReg.files['stack'] = os.path.realpath( fileName )
zorroReg.files['align'] = baseFront + "_zorro_movie.mrcs"
zorroReg.files['sum'] = baseFront + "_zorro.mrc"
realConfig = os.path.join( os.path.realpath( outputFolder ), baseName + ".log" )
zorro.call( realConfig ) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/scripts/2dxZorro.py | 2dxZorro.py |
import numpy as np
import re, os, os.path, glob
import matplotlib.pyplot as plt
def parseGCTFLog( logName ):
"""
Parse the GCTF log, tested on GCTF v1.06.
"""
validIndex = -1
CTFInfo = dict()
with open( logName, 'r' ) as fh:
logLines = fh.readlines()
# Find "LAST CYCLE"
for J in np.arange( len( logLines )-1, 0, -1 ):
# Search backwards
if "VALIDATION" in logLines[J]:
# print( "Found VALIDATION at line %d" % J )
validIndex = J
if "LAST CYCLE" in logLines[J]:
# print( "Found LAST CYCLE at line %d" % J )
if validIndex > 0:
validLines = logLines[validIndex+2:-1]
resultsLines = logLines[J:validIndex]
break
ctfList = resultsLines[3].split()
CTFInfo[u'DefocusU'] = float( ctfList[0] )
CTFInfo[u'DefocusV'] = float( ctfList[1] )
CTFInfo[u'DefocusAngle'] = float( ctfList[2] )
CTFInfo[u'CtfFigureOfMerit'] = float( ctfList[3] )
CTFInfo[u'FinalResolution'] = float( resultsLines[5].split()[6] )
CTFInfo[u'Bfactor'] = float( resultsLines[6].split()[3] )
# Would be kind of nice to use pandas for the validation, but let's stick to a dict
for valid in validLines:
valid = valid.split()
try:
CTFInfo[ valid[0] ] = [ float(valid[1]), float(valid[2]), float(valid[3]), float(valid[4]), float(valid[6]) ]
except ValueError:
CTFInfo[ valid[0] ] = [ valid[1], valid[2], valid[3], valid[4], valid[5] ]
return CTFInfo
if __name__ == "__main__":
logNames = glob.glob( "*gctf.log" )
N = len(logNames)
ctfData = np.zeros( [N, 6], dtype='float32' )
ctfValid = np.zeros( [N, 5], dtype='float32' )
logDicts = [None] * N
for J, logName in enumerate( logNames ):
ctfDict = parseGCTFLog( logName )
logDicts[J]
ctfData[J,0] = 0.5*( ctfDict[u'Defocus_U'] + ctfDict[u'Defocus_V'] )
ctfData[J,1] = np.abs( ctfDict[u'Defocus_U'] - ctfDict[u'Defocus_V'] )
ctfData[J,2] = ctfDict[u'CtfFigureOfMerit']
ctfData[J,3] = ctfDict[u'FinalResolution']
ctfData[J,4] = ctfDict[u'Bfactor']
ctfValid[J,0] = ctfDict['20-08A'][-1]
ctfValid[J,1] = ctfDict['15-06A'][-1]
ctfValid[J,2] = ctfDict['12-05A'][-1]
ctfValid[J,3] = ctfDict['10-04A'][-1]
ctfValid[J,4] = ctfDict['08-03A'][-1] | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/scripts/gctfLogParser.py | gctfLogParser.py |
import os, os.path, subprocess, sys, shutil
import numpy as np
import numexprz as ne
import time
import psutil
import matplotlib.pyplot as plt
#from matplotlib import collections
import zorro
plt.rcParams['lines.linewidth'] = 1.0
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.size'] = 16.0
def normalize(a):
""" Normalizes the input to the range [0.0,1.0].
Returns floating point if integer data is passed in."""
if np.issubdtype( a.dtype, np.integer ):
a = a.astype( 'float' )
amin = a.min()
arange = (a.max() - amin)
a -= amin
a /= arange
return a
def countPhysicalProcessors():
cpuInfo = ne.cpu.info
physicalIDs = []
for J, cpuDict in enumerate( cpuInfo ):
if not cpuDict['physical id'] in physicalIDs:
physicalIDs.append( cpuDict['physical id'] )
return len( physicalIDs )
def getMHz( cpuVirtualCores ):
strMHz = subprocess.check_output( "cat /proc/cpuinfo | grep MHz", shell=True ).split()
# Every fourth one is a MHz
cpuMHz = np.zeros( len(strMHz)/4, dtype='float32' )
for J in np.arange( 0, cpuVirtualCores ):
cpuMHz[J] = np.float32( strMHz[4*J + 3 ] )
return cpuMHz
def getCpuUsage( percpu = False):
# This must be called at the start of a loop to get the initial the values in /proc/stat
if percpu:
return psutil.cpu_percent( percpu=True )
else:
return psutil.cpu_percent()
def getMemUsage():
mem = psutil.virtual_memory()
return( [mem.used, mem.percent] )
def getDiskReadWrite():
diskIO = psutil.disk_io_counters()
return( [diskIO.read_time, diskIO.write_time] )
class benchZorro( object ):
def __init__(self, maxSamples = 3600, sleepTime = 1.0 ):
self.maxSamples = maxSamples
self.sleepTime = sleepTime
self.cpuInfo = ne.cpu.info
self.cpuVirtualCores = len( self.cpuInfo )
self.cpuModel = self.cpuInfo[0]['model name']
self.cpuAdvertisedMHz = 1000.0 * np.float32(self.cpuModel.split('@')[1].rstrip('GHz'))
self.cpuPowerManagement = self.cpuInfo[0]['power management']
self.cpuFlags = self.cpuInfo[0]['flags'].split() # Look for 'sse', 'sse2', 'avx', 'fma' for FFTW compilation
self.cpuCacheSize = np.float32( self.cpuInfo[0]['cache size'][:-3] ) # Should be kiloBytes
self.cpuCoresPerProcessor = np.int(ne.cpu.info[0]['cpu cores'])
self.cpuPhysicalCores = self.cpuCoresPerProcessor * countPhysicalProcessors()
if len(self.cpuInfo) == 2*self.cpuPhysicalCores:
self.hyperthreading = True
else:
self.hyperthreading = False
self.zorroDefault = zorro.ImageRegistrator()
self.zorroDefault.diagWidth = 5
self.zorroDefault.CTFProgram = None
self.zorroDefault.filterMode = None
self.zorroDefault.doLazyFRC = False
self.zorroDefault.savePNG = False
self.resetStats()
def resetStats(self):
self.index = 0
self.cpuMHz = np.zeros( [self.maxSamples, self.cpuVirtualCores] )
self.cpuUsage = np.zeros( [self.maxSamples, self.cpuVirtualCores] )
self.memUsage = np.zeros( [self.maxSamples, 2] )
self.rwUsage = np.zeros( [self.maxSamples, 2] )
self.timeAxis = np.zeros( [self.maxSamples] )
def updateStats(self):
# sys.stdout.write( "\r >> Bench %.1f" % (100.0*index/Ntest) + " %" ); sys.stdout.flush()
self.cpuMHz[self.index,:] = getMHz( self.cpuVirtualCores )
self.cpuUsage[self.index,:] = getCpuUsage( percpu = True )
self.memUsage[self.index,:] = getMemUsage()
self.rwUsage[self.index,:] = getDiskReadWrite()
self.timeAxis[self.index] = time.time()
self.index += 1
def finishStats(self):
self.index -= 1
self.timeAxis = self.timeAxis[:self.index+1]
self.cpuMHz = self.cpuMHz[:self.index+1,:]
self.cpuUsage = self.cpuUsage[:self.index+1,:]
self.memUsage = self.memUsage[:self.index+1,:]
self.rwUsage = self.rwUsage[:self.index+2,:]
self.timeAxis -= self.timeAxis[0]
self.cpuMHz_all_percent = np.mean( self.cpuMHz, axis=1 ) / self.cpuAdvertisedMHz * 100.0
self.cpuUsage_all = np.mean( self.cpuUsage, axis=1 )
# How to get % ioUsage? Just use normalize?
self.ioUsage = 100.0* normalize( np.sum( np.diff( self.rwUsage, axis=0 ).astype('float32'), axis=1 )[:self.index] )
def plotStats(self, N_processes, N_threads ):
fileExt = self.cpuModel.replace(" ","") + "_Nproc%d_Nthread%d.png" % (N_processes, N_threads)
plt.figure( figsize=(12,10) )
plt.plot( self.timeAxis, self.cpuMHz_all_percent, label = "CPU throttle", color='purple', linewidth=1.5 )
plt.plot( self.timeAxis, self.cpuUsage_all, label = "CPU usage", color='steelblue', linewidth=1.5 )
plt.plot( self.timeAxis, self.memUsage[:,1], label = "Memory usage", color='firebrick', linewidth=1.5 )
plt.plot( self.timeAxis[:-1], self.ioUsage, label="Disk IO (norm)", color='forestgreen', linewidth=1.5 )
plt.xlabel( "Time (s)" )
plt.ylabel( "Performance metrics (%)" )
plt.legend( loc='best' )
plt.title( "Benchmark for %s" % self.cpuModel + "\n $N_{processes}=%d, N_{threads}=%d$" %( N_processes, N_threads) )
# plt.ylim( [0, 140] )
plt.xlim( [0, self.timeAxis[-1]] )
plt.savefig( "Benchmark_" + fileExt )
##### Make a waterfall plot of CPU usage per processor
waterfallColors = plt.cm.gnuplot( np.linspace(0.0,1.0,self.cpuVirtualCores+1) )
# http://matplotlib.org/examples/api/collections_demo.html
cumsum_cpu = np.cumsum( self.cpuUsage, axis=1 )
cumsum_cpu = np.hstack( [np.zeros([cumsum_cpu.shape[0], 1]), cumsum_cpu])
plt.figure( figsize=(12,10) )
for J in np.arange(1,self.cpuVirtualCores+1):
#plt.plot( timeAxis, cumsum_cpu[:,J], color=waterfallColors[J] )
plt.fill_between( self.timeAxis, cumsum_cpu[:,J-1], cumsum_cpu[:,J], facecolor=waterfallColors[J], color=[0.0,0.0,0.0,0.3], linewidth=0.5, interpolate=True )
plt.xlim( [0, self.timeAxis[-1]] )
plt.xlabel( "Time (s)" )
plt.ylabel( "CPU utilization (%)" )
plt.title( "per CPU utilization for %s" % self.cpuModel + "\n $N_{processes}=%d, N_{threads}=%d$" %( N_processes, N_threads) )
plt.savefig( "perCPUBenchmark_" + fileExt )
def __str__(self):
returnstr = "##### CPU INFO #####\n"
returnstr += "Model: %s\n" % self.cpuModel
returnstr += "Power management scheme: %s\n" % self.cpuPowerManagement
returnstr += "Cache size: %s\n" % self.cpuInfo[0]['cache size']
returnstr += "Hyperthreading: %s\n" % self.hyperthreading
returnstr += "No. Physical Cores: %d\n" % self.cpuPhysicalCores
return returnstr
def benchmark( self, stackName, N_processes, N_threads ):
#dirName = os.path.dirname( stackName[0] )
#baseName = os.path.basename( stackName[0] )
# We need to make 3 copies of the stack
stackFront, stackExt = os.path.splitext( stackName )
stackName = [stackName]
N_cases = len( N_processes )
t_start = np.zeros( N_cases )
t_finish = np.zeros( N_cases )
maxMemory = np.zeros( N_cases )
meanCPUusage = np.zeros( N_cases )
if N_cases > 1:
# Force use of Agg if we are generating many plots.
plt.switch_backend( 'Agg' )
# Pre-plan FFTW
import tempfile
forward = np.zeros( [4096, 4096], dtype='complex64' )
reverse = np.zeros( [4096, 4096], dtype='complex64' )
wisdomFile = os.path.join( tempfile.tempdir, "fftw_wisdom.pkl" )
for nthread in np.unique(N_threads):
print( "Pre-planning FFTW wisdom for N=%d threads" % nthread )
zorro.util.pyFFTWPlanner( forward, reverse, wisdomFile=wisdomFile, n_threads=nthread )
# Make copies of the input file to avoid file IO collisions
for J in np.arange( 1, np.max(N_processes) ):
newName = stackFront + str(J) + stackExt
stackName.append( newName )
print( "Copying %s to %s" % (stackName[0], stackName[J]) )
if not os.path.isfile( newName ):
shutil.copy( stackName[0], newName )
pass
for K in range(N_cases):
print( "##### STARTING BENCHMARK #%d, N_PROCS = %d, N_THREADS = %d" %(K, N_processes[K], N_threads[K] ) )
self.zorroDefault.n_threads = N_threads[K]
ProcList = []
self.resetStats()
for J in range( N_processes[K] ):
self.zorroDefault.files['stack'] = stackName[J]
self.zorroDefault.saveConfig( "stack%d.zor"%J )
t_start[K] = time.time()
self.updateStats()
# Start all the processes
for J in range( N_processes[K] ):
ProcList.append( subprocess.Popen( "zorro -c stack%d.zor"%J, shell=True ) )
# Poll the processes and also call our stats
finished = np.zeros( len(ProcList), dtype='bool' )
while self.index < self.maxSamples:
self.updateStats()
for I, P in enumerate(ProcList):
finished[I] = P.poll() != None
if np.all( finished ):
print( "Finished benchmark for N_processes: %d, N_threads: %d" % (N_processes[K], N_threads[K]))
break
time.sleep( self.sleepTime )
t_finish[K] = time.time()
self.finishStats()
self.plotStats( N_processes[K], N_threads[K] )
maxMemory[K] = np.max( self.memUsage[:,0] ) / 2**30 # GB
meanCPUusage[K] = np.mean( np.sum( bencher.cpuUsage, axis=1 ) ) #
t_consumed_per = (t_finish - t_start)/ N_processes
print( self.__str__() )
for K in range(N_cases):
print( "Case %d: %d processes, %d threads each, time per stack: %.3f s, CPU usage: %.2f, maximum virtual Memory %.2f GB"
%( K, N_processes[K], N_threads[K], t_consumed_per[K], meanCPUusage[K], maxMemory[K] ) )
# Save a simple output file
np.max(self.memUsage[:,0])
np.savetxt( "bench"+self.cpuModel.replace(" ","")+".txt",
np.vstack( [N_processes, N_threads, t_consumed_per, meanCPUusage, maxMemory ] ).transpose(), fmt="%.2f",
header = "Benchmark for %s \n N_processes | N_threads | time_consumed_per_process | meanCPU | max Memory" % self.cpuModel )
if __name__ == "__main__":
bencher = benchZorro()
try:
stackName = sys.argv[1]
except IndexError:
print( "Usage error: try 'python zorro_benchmark someStack.dm4'" )
exit(1)
# Here's an example of setting up likely situations for processing, with a maximum of 4 processes
# This test mostly shows that hyperthreading makes Zorro slower, because the calculations are already
# block-optimized.
n_cases = 8
n_procs = [1,2,3,4,1,2,3,4]
max_threads = bencher.cpuVirtualCores
n_threads = [max_threads, max_threads/2, max_threads/3, max_threads/4, max_threads/2, max_threads/4,
max_threads/6, max_threads/8]
# n_cases = 1
# n_procs = [2,]
# n_threads = [12,]
bencher.benchmark( stackName, n_procs, n_threads ) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/scripts/lanczos_benchmark.py | lanczos_benchmark.py |
import zorro
import RAMutil as ram
import zorro
import numpy as np
import numexprz as ne
from plotting import ims
import matplotlib.pyplot as plt
import scipy.interpolate as intp
np.random.seed( 44 )
M = 40
N = 2048
contrast = 0.5 # Generally around 0.1 or so for energy-filtered images.
tx = 0.5 # Time between frames in seconds (for drift)
ePerPixel = 2 # electrons per pixel per frame, must be an integer to make Poisson/Binomial statistics feasible computationally
# For a binomial distribution, the dose has to be an integer. We could simulate the sum and then
# randomly distribute the counts in all the frames?
kV = 300E3
wavelen = ram.ewavelength( kV ) * 1E9 # wavelength in nm
C1 = 1000 # Defocus, nm
A1x = 50 # Astigmatism, x-axis, nm
A1y = 25 # Astigmatism, orthogonal to x-axis (which is actually 45 degrees, not 90), nm
pixelsize = 0.13 # nm per pixel
objRadius = 12 # object size in pixels (will be depricated when I develop a more complicated object)
Bfactor = 1.0 # nm^2
criticalDose = 2000 # Scaling on Bfactor, electrons per square nm before contrast drops by 1/e
n_threads = 24
D_charge = 10.0
D_environ = 1.0 # Drift in pix**2 / s
k_decay = 0.5
velocity = np.array( [2.5, 0] ) # constant (bias) velocity applied to drift in pix/s
hotpixelRate = 0.005 # hot pixels, probability per pixel
hotpixelSigma = 5.0 # How bright are hot pixels? Normally distributed with (Mean + sigma*std)
ne.set_num_threads( n_threads )
[xmesh,ymesh] = np.meshgrid( np.arange(-N/2,N/2).astype(zorro.float_dtype), np.arange(-N/2,N/2).astype(zorro.float_dtype) )
r2mesh = ne.evaluate( 'xmesh**2 + ymesh**2' )
t_axis = np.arange( 1, M + tx ) * tx
dose_axis = np.arange( 1, M+1 ) * ePerPixel / pixelsize**2
Drift_decay = np.exp( -k_decay * t_axis )
# Looks like we have to simulate each step individually if we want different scales
shifts = np.zeros( [M,2] )
for J in xrange(0,M):
shifts[J,1] = np.random.normal( loc=velocity[1]*tx, scale=(D_charge * Drift_decay[J] + D_environ)*np.sqrt(tx), size=1 )
shifts[J,0] = np.random.normal( loc=velocity[0]*tx, scale=(D_charge * Drift_decay[J] + D_environ)*np.sqrt(tx), size=1 )
trans = np.cumsum( shifts, axis=0 )
centroid = np.mean( trans, axis=0 )
trans -= centroid
splineX = intp.UnivariateSpline( t_axis, trans[:,1], s = 0.0 )
splineY = intp.UnivariateSpline( t_axis, trans[:,0], s = 0.0 )
t_display = np.linspace( np.min(t_axis), np.max(t_axis), 2048 )
plt.figure()
plt.plot( trans[:,1], trans[:,0], 'k.', label='trans' )
plt.plot( splineX(t_display), splineY(t_display), label='spline' )
plt.legend( loc='best' )
# motion-blur velocity vectors can be computed from numerical derivative of the splines
print( "TODO: compute instantaneous velocity vectors" )
# Object is a few spheres
# Make a short array of object positions
objectCount = 50
objectPositions = np.random.uniform( low = -N/2 + objRadius, high=N/2-objRadius, size=[objectCount,2] )
# Check for overlap
for J in xrange(0,objectCount):
# Check against the following points
for K in xrange(J+1,objectCount):
euclid = np.sqrt( np.sum( (objectPositions[J,:] - objectPositions[K,:])**2) )
if euclid <= 2*objRadius:
print( str(J) + " is to close to " + str(K) )
objectPositions[K,:] = np.random.uniform( low = -N/2 + objRadius, high=N/2-objRadius, size=[1,2] )
K -= 1
pass
phaseObject = np.ones( [M, N, N] )
for J in xrange(0,M):
# Projected potential of a sphere as a
for K in xrange(0,objectCount):
offsetX = objectPositions[K,1] + trans[J,1]
offsetY = objectPositions[K,0] + trans[J,0]
r2_s = ne.evaluate( "(xmesh+offsetX)**2 + (ymesh+offsetY)**2" )
r_s = ne.evaluate( "sqrt(r2_s)" )
projSphere = ne.evaluate( "sqrt( objRadius**2 - r2_s)" )
projSphere[ np.isnan( projSphere ) ] = 0.0
projSphere = 1.0 - projSphere*contrast/np.max( projSphere )
if np.sum( np.isnan( projSphere) ) > 0:
print( "Found Nans in J = " + str(J) + " and K = " + str(K) )
else:
phaseObject[J,:,:] *= projSphere
#ims( phaseObject[J,:,:] )
# TODO: radiation damage, drift MTF, and CTF in Fourier space
[FFT2,IFFT2] = zorro.util.pyFFTWPlanner( r2mesh.astype(zorro.fftw_dtype), n_threads=n_threads )
FFTPhase = np.zeros( [M, N, N], dtype=zorro.fftw_dtype )
inv_ps = 1.0 / (pixelsize * N)
# Apply CTF waveplate
# Let's just ignore wavelength? I don't think we've used it anywhere else?
qxmesh = xmesh * inv_ps * wavelen
qymesh = ymesh * inv_ps * wavelen
q2mesh = ne.evaluate( "qxmesh*qxmesh + qymesh*qymesh" )
# Compute CTF phase gradient
phaseC1 = ne.evaluate( 'C1*0.5*(qxmesh**2 + qymesh**2)' )
phaseA1x = ne.evaluate( 'A1x*0.5*(qxmesh**2 - qymesh**2)' )
phaseA1y = ne.evaluate( 'A1y*0.5*(-qxmesh**2 + qymesh**2)' )
wavenumber = 2.0j * np.pi / wavelen
Gamma = ne.evaluate( 'exp( wavenumber * (phaseC1 + phaseA1x + phaseA1y) )' )
#ims( np.angle(Gamma), titles=("Gamma aberration waveplate",) )
Gamma = np.fft.ifftshift( Gamma )
realObject = np.zeros_like( phaseObject, dtype=zorro.fftw_dtype )
Bfilter = np.zeros_like( phaseObject, dtype=zorro.fftw_dtype )
for J in xrange(0,M):
FFT2.update_arrays( phaseObject[J,:,:].astype(zorro.fftw_dtype), FFTPhase[J,:,:] ); FFT2.execute()
# Build B-factor for this dose
# Bfactor needs to be modified by the wavelen, because q meshes are unitless (angular spectrum)
Bfactor_funcDose = Bfactor*dose_axis[J] / criticalDose / 4 / wavelen**2
Bfilter[J,:,:] = np.fft.ifftshift( ne.evaluate( "exp( -Bfactor_funcDose * q2mesh )" ) )
FFTPhase[J,:,:] *= Bfilter[J,:,:]
# Apply CTF phase plate to complex amplitude
FFTPhase[J,:,:] *= Gamma
# TODO: apply drift MTF
# MTF_sinc = np.sinc( np.pi * t_x * velocity * q )
# Define the randomwalk as having a mean square displacement of what?
# MTF_randomwalk = 1.0 - np.exp( -4.0 * np.pi * D * tx)
# So the velocity
# Inverse FFT back to real-space
IFFT2.update_arrays( FFTPhase[J,:,:], realObject[J,:,:] ); IFFT2.execute()
realObject = np.real( realObject ).astype( zorro.float_dtype ) / (N*N)
print( "realObject min contrast: %f"%np.min( realObject ) )
print( "realObject max contrast: %f"%np.max( realObject ) )
realContrast = (np.max(realObject) - np.min(realObject))/(np.max(realObject) + np.min(realObject))
print( "realObject contrast: %f"%(realContrast) )
# Force maximum contrast to 1.0
realObject = realObject / np.max( realObject )
# Apply a low-pass filter to the CTF to reflect coherence angle Beta?
#ims( realObject, titles=("Real Object",) )
#ims( np.abs( np.fft.fftshift( np.fft.fft2( realObject[0,:,:] ))))
#ims( np.abs( np.fft.fftshift( np.fft.fft2( realObject[J,:,:] ))))
# Generate a weak background, that's off-center
#background = ne.evaluate( "1.0 - ((xmesh - N/8)**2 + (ymesh+N/16)**2 ) / (N*4.0)**2" )
#print( "Background minimum: %f"%np.min( background ) )
## We assume the background is in the illumination, but it's not affected by the CTF because it's
## pure amplitude.
#phaseObject *= background
# So we need Poisson distributed counts, and then we need to apply the phaseObject as a binomial distribution
# on top of the Poisson counts
shotNoise = np.random.poisson( lam=ePerPixel, size=realObject.shape ) # This generator for poisson is quite slow...
noisyObject = np.random.binomial( shotNoise, realObject, size=phaseObject.shape ).astype( zorro.float_dtype )
# Is this right? Should it be the other way around? But then we don't have a good continuous Poisson
noisyContrast = np.sqrt(2) * np.std( noisyObject, axis=(1,2) ) / np.mean( noisyObject, axis=(1,2))
print( "noisyObject mean contrast: %f"%(np.mean(noisyContrast) ) )
# ims( noisyObject, titles=("Noisy Object",) )
# Apply detector MTF, ideally this would be DQE
print( "TODO: apply detector MTF" )
# Apply hot pixels
print( "TODO: apply hot pixels" )
hotpixMask = np.random.binomial( 1, hotpixelRate, size=[N,N] )
# NOT WHAT I WANT, need some variation frame-to-frame in hot pixel values...
hotpixImage = np.random.normal( loc=(ePerPixel +hotpixelSigma*np.sqrt(ePerPixel)),
scale=hotpixelSigma*np.sqrt(ePerPixel), size=hotpixMask.shape ).astype('float32')
hotpixImage = np.clip( hotpixMask, 0, np.Inf )
hotpixImage *= (hotpixMask.astype( 'float32' ))
ims( hotpixImage )
for J in xrange(0,M):
print( "Applying hot pixel mask for image " + str(J) )
noisyObject[J,:,:] = (noisyObject[J,:,:] *(~hotpixMask)) + hotpixImage
zorroReg = zorro.ImageRegistrator()
zorroReg.shapePadded = zorro.util.findValidFFTWDim( [N*1.1,N*1.1] )
zorroReg.images = noisyObject
zorroReg.stackName = 'Sim'
zorroReg.saveC = True
zorroReg.Bmode = 'opti'
zorroReg.triMode = 'diag'
zorroReg.weightMode = 'logistic'
zorroReg.peaksigThres = 5.5
zorroReg.diagWidth = 5
#zorroReg.Brad = 256
zorroReg.alignImageStack()
unblurReg = zorro.ImageRegistrator()
unblurReg.images = noisyObject
unblurReg.stackName = "Sim"
unblurReg.pixelsize = 1.0
unblurReg.xcorr2_unblur()
mcReg = zorro.ImageRegistrator()
mcReg.images = noisyObject
mcReg.stackName = "Sim"
mcReg.Brad = 256
mcReg.xcorr2_mc()
# The features are too low frequency for the maximum to be reliable here... Need sharper features in the
# phantom.
ims( zorroReg.C )
plt.figure()
plt.plot( trans[:,1], trans[:,0], '.-', label='Sim' )
plt.plot( zorroReg.translations[:,1], zorroReg.translations[:,0], '.-', label='Zorro' )
plt.plot( unblurReg.translations[:,1], unblurReg.translations[:,0], '.-', label='UnBlur' )
plt.plot( mcReg.translations[:,1], mcReg.translations[:,0], '.-', label='MC' )
plt.title( "Translations from analytic phantom" )
plt.legend( loc='best' )
bias_zorro = np.mean( zorroReg.translations - trans, axis=0 )
rms_zorro = np.std( zorroReg.translations - trans, axis=0 )
bias_unblur = np.mean( unblurReg.translations - trans, axis=0 )
rms_unblur = np.std( unblurReg.translations - trans, axis=0 )
bias_mc = np.mean( mcReg.translations - trans, axis=0 )
rms_mc = np.std( mcReg.translations - trans, axis=0 )
print( "Zorro bias = " + str(bias_zorro) + ", rms = " + str(rms_zorro) )
print( "UnBlur bias = " + str(bias_unblur) + ", rms = " + str(rms_unblur) )
print( "Motioncorr bias = " + str(bias_mc) + ", rms = " + str(rms_mc) ) | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/scripts/analyticPhantom.py | analyticPhantom.py |
import zorro
import zorro.zorro_util as util
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as col
import numexprz as nz
def orientGainRef( stackName, gainRefName,
stackIsInAHole=True, applyHotPixFilter = True, doNoiseCorrelation=False,
relax=0.95, n_threads = None ):
"""
USAGE
Applies the gain reference over all possible orientations to determine which
is the best, quantitatively. Calculates the image standard deviation after
gain normalization (less is better) and the number of outlier pixels (less is
better) and the degree of correlated noise (less is better)
User should select a short flat-field stack (20 frames) in a hole. If no flat-field
image is available, pick a stack with no carbon or large amounts of ice, that
exhibited a large amount of drive.
"""
# ANALYSIS SCRIPT
zGain = zorro.ImageRegistrator()
zGain.loadData( gainRefName, target='sum' )
zStack = zorro.ImageRegistrator()
zStack.loadData( stackName )
rawStack = np.copy( zStack.images )
#print( zGain.imageSum.shape )
#print( zStack.images.shape )
# Check if stack and gain is transposed
if zGain.imageSum.shape[0] == zGain.imageSum.shape[1]:
print( "Square images" )
orientList = [
[0,False,False], [0,False,False],
[0,True,False], [0,True,True],
[1,False,False], [1,False,False],
[1,True,False], [1,True,True],
]
elif (zGain.imageSum.shape[0] != zStack.images.shape[1]
and zGain.imageSum.shape[0] == zStack.images.shape[2] ):
print( "Rectangular image, rot90=1" )
orientList = [
[1,False,False], [1,False,False],
[1,True,False], [1,True,True],
]
else:
print( "Rectangular image, rot90=0" )
orientList = [
[0,False,False], [0,False,False],
[0,True,False], [0,True,True],
]
# If the images are rectangular our life is easier as it cuts the number of
# possible orientations in half.
N = len(orientList)
stdArray = np.zeros( N )
outlierPixCount = np.zeros( N )
corrNoiseCoeff = np.zeros( N )
hotCutoff = np.zeros( N )
deadCutoff = np.zeros( N )
binnedSum = [None] * N
if bool(doNoiseCorrelation):
FFTMage = np.empty( zStack.images.shape[1:], dtype='complex64' )
FFTConj = np.empty( zStack.images.shape[1:], dtype='complex64' )
IFFTCorr = np.empty( zStack.images.shape[1:], dtype='complex64' )
FFT2, IFFT2 = zorro.zorro_util.pyFFTWPlanner( FFTMage, FFTConj, n_threads=24 )
normConst2 = np.float32( 1.0 / np.size( FFTMage )**2 )
for I, orient in enumerate(orientList):
gainRef = np.copy( zGain.imageSum )
if orient[0] > 0:
print( "Rotating gain refernce by 90 degrees" )
gainRef = np.rot90( gainRef, k=orient[0] )
if orient[1] and orient[2]:
print( "Rotating gain reference by 180 degrees" )
gainRef = np.rot90( gainRef, k=2 )
elif orient[1]:
print( "Mirroring gain reference vertically" )
gainRef = np.flipud( gainRef )
elif orient[2]:
print( "Mirroring gain reference horizontally" )
gainRef = np.fliplr( gainRef )
zStack.images = zStack.images * gainRef
if applyHotPixFilter:
zStack.hotpixInfo['relax'] = relax
zStack.hotpixFilter()
outlierPixCount[I] = zStack.hotpixInfo['guessDeadpix'] + zStack.hotpixInfo['guessHotpix']
hotCutoff[I] = zStack.hotpixInfo[u'cutoffUpper']
deadCutoff[I] = zStack.hotpixInfo[u'cutoffLower']
binnedSum[I] = util.squarekernel( np.sum(zStack.images,axis=0), k=3 )
# zorro.zorro_plotting.ims( binnedSum[I] )
stdArray[I] = np.std( np.sum( zStack.images, axis=0 ) )
if bool(stackIsInAHole) and bool(doNoiseCorrelation) :
# Go through even-odd series
for J in np.arange(1,zStack.images.shape[0]):
print( "(Orientation %d of %d) Compute Fourier correlation %d" % (I,N,J) )
if np.mod(J,2) == 1:
FFT2.update_arrays( zStack.images[J,:,:].astype('complex64'), FFTConj ); FFT2.execute()
else:
FFT2.update_arrays( zStack.images[J,:,:].astype('complex64'), FFTMage ); FFT2.execute()
IFFT2.update_arrays( nz.evaluate( "normConst2*FFTMage*conj(FFTConj)"), IFFTCorr ); IFFT2.execute()
corrNoiseCoeff[I] += np.abs( IFFTCorr[0,0] )
elif bool(doNoiseCorrelation):
# Calculate phase correlations with a frame seperation of 6 frames to
# avoid signal correlation
frameSep = 6
for J in np.arange(0,zStack.images.shape[0] - frameSep):
print( "(Orientation %d of %d) Compute Fourier correlation %d" % (I,N,J) )
FFT2.update_arrays( zStack.images[J,:,:].astype('complex64'), FFTConj ); FFT2.execute()
FFT2.update_arrays( zStack.images[J+frameSep,:,:].astype('complex64'), FFTMage ); FFT2.execute()
IFFT2.update_arrays( nz.evaluate( "normConst2*FFTMage*conj(FFTConj)"), IFFTCorr ); IFFT2.execute()
corrNoiseCoeff[I] += np.real( IFFTCorr[0,0] )
pass
corrNoiseCoeff[I] /= normConst2
zStack.images = np.copy( rawStack )
#corrNoiseCoeff /= np.min( corrNoiseCoeff )
#stdArray /= np.min(stdArray)
bestIndex = np.argmin( stdArray )
nrows = 2
ncols = np.floor_divide( N+1, 2 )
plt.figure( figsize=(16,9))
for I in np.arange(N):
plt.subplot( nrows*100 + ncols*10 + (I+1) )
clim = util.histClim( binnedSum[I], cutoff=1E-3 )
plt.imshow( binnedSum[I], cmap='gray', norm=col.LogNorm(), vmin=clim[0], vmax=clim[1] )
plt.axis('off')
if I == bestIndex:
textcolor = 'purple'
else:
textcolor= 'black'
title = "kRot: %d, VertFlip: %s, HorzFlip: %s \n" % (orientList[I][0], orientList[I][1], orientList[I][2])
title += r"$\sigma: %.5g$" % stdArray[I]
if bool(applyHotPixFilter):
title += r"$, outliers: %d$" % outlierPixCount[I]
if bool(doNoiseCorrelation):
title += r"$, R_{noise}: %.5g$" % corrNoiseCoeff[I]
plt.title( title, fontdict={'fontsize':14, 'color':textcolor} )
plt.show(block=False)
return orientList[bestIndex]
# USER PARAMETERS
#==============================================================================
# if __name__ == "__main__":
# gainRefName = "/Projects/FourByte/raw/gainref/Gain-ref_x1m2_2016-08-30_0900AM_till_2016-08-30_1100PM.dm4"
# stackName = "/Projects/FourByte/raw/Aug31_02.41.10.mrc"
# orientGainRef( gainRefName, stackName, stackIsInAHole=False, applyHotPixFilter=True,
# relax=0.95, n_threads = nz.detect_number_of_cores() )
#============================================================================== | zorroautomator | /zorroautomator-0.7.4b0.tar.gz/zorroautomator-0.7.4b0/zorro/scripts/orientGainReference.py | orientGainReference.py |
from .operators import operators
class Parser:
def __init__(self, handler, api=None, params={}):
self.handler = handler
self.api = api
# initialize the labels with the params that were sent in
self.labels = params
### MATH ###
def eval_condition(self, cond_statement):
operator = cond_statement['operator']
operator_fn = operators[operator]
params = self.eval_all(cond_statement['params'])
return operator_fn(*params)
def eval_math(self, math_statement):
fn = math_statement['fn']
operator_fn = operators[fn]
params = self.eval_all(math_statement['params'])
return operator_fn(*params)
### INDICATORS EVALUATION ###
def eval_preset(self, preset_reference):
from . import presets
preset_id = preset_reference['id']
preset_expression = presets.get(preset_id, {})
return self.eval(preset_expression)
def eval_indicator(self, indicator_reference):
from . import Indicator
indicator_id = indicator_reference['id']
settings = indicator_reference.get('settings', {})
indicator = Indicator.from_api(indicator_id, self.api)
try:
return indicator.evaluate(self.handler.get_data(), settings)
except Exception as e:
return {"type": "error", "error_source": "indicator", "exception": e}
### COMMANDS/CONTROL FLOW ###
def eval_if(self, if_statement):
condition_true = self.eval_condition(if_statement['condition'])
if condition_true:
if 'true' in if_statement:
return self.exec(if_statement['true'])
else:
if 'false' in if_statement:
return self.exec(if_statement['false'])
def eval_order(self, order):
quantity = order['qty']
# Enter/exit conditions for the order
# if none, doesn't wait for a condition to enter, and holds pos
# indefinitely (doesn't exit)
# enter = order.get('enter')
# exit_ = order.get('exit')
return self.handler.order(quantity)
def exec(self, command_list):
for command in command_list:
val = self.eval_command(command)
if val is not None:
if val['type'] == 'return':
return val['value']
elif val['type'] == 'error':
return val
def eval_command(self, command):
if command['type'] == 'if':
return self.eval_if(command)
elif command['type'] == 'order':
return {'type': 'order', 'value': self.eval_order(command)}
elif command['type'] == 'return':
return {'type': 'return', 'value': self.eval(command['value'])}
elif command['type'] == 'setlabel':
self.labels[command['label']] = self.eval(command['value'])
### GENERAL EVAL FUNCTIONS
def eval_all(self, objects):
return [self.eval(object) for object in objects]
### Evaluates an expression
def eval(self, expression):
if type(expression) in [int, float, str]:
return expression
if expression['type'] == 'condition':
return self.eval_condition(expression)
elif expression['type'] == 'indicator':
return self.eval_indicator(expression)
elif expression['type'] == 'number':
return expression['value']
elif expression['type'] == 'math':
return self.eval_math(expression)
elif expression['type'] == 'getlabel':
return self.labels[expression['label']]
### When it receives orders, it just logs them
class LogHandler:
def __init__(self):
self.log = []
def place_order(self, side, stock, quantity, enter=None, exit=None):
self.log.append({
'side': side,
'stock': stock,
'quantity': quantity,
'enter': enter,
'exit': exit
})
#### USAGE
if __name__ == "__main__":
code = [
{'type': 'order', 'qty': 10, 'stock': 'AAPL'}
]
# the Handler is what provides the stock data and what receives the orders
log = LogHandler()
parser = Parser(log)
parser.exec(code)
print(log.log) | zorroclient | /zorroclient-0.0.1a0-py3-none-any.whl/zorro/parse_zorroscript.py | parse_zorroscript.py |
import yfinance
import bisect
class PriceCache:
"""
This class stores the price of a certain stock at a certain time.
"""
def __init__(self):
# Cache format: {Stock: {Interval: {Datetime: Stock data}}
self.cache = {}
# Stores the ranges of data that have already been downloaded
# Format: {Stock: {Interval: {Start of range: End of range}}}
self.downloaded_ranges = {}
def _save_range(self, symbol, start_time, end_time, interval):
if symbol not in self.cache[symbol]:
self.cache[symbol] = {}
symbol_cache = self.cache[symbol]
if interval not in symbol_cache:
symbol_cache[interval] = {}
# This is a reference, so when we edit this, we are actually editing self.cache[symbol][interval] indirectly
interval_cache = symbol_cache[interval]
data = yfinance.Ticker(symbol).history(start=start_time, end=end_time, interval=interval)
# Returns {datetime: stock_data, ...}
rows = data.to_dict('index')
# Interval_cache stores
interval_cache.update(rows)
def cache_stock_data(self, symbol, start, end, interval):
"""
Caches the stock data downloaded from the yfinance downloader.
If the data was already cached, does not cache again.
If there are "holes" in the data (eg you have up to April 10, and past April 12, but you want
data from April 9 - April 15) then it will patch the holes, but not redownload the other data.
Args:
symbol (string): Stock symbol
start (datetime): The beginning of the data you would like to download
end (datetime): The end of the data you would like to download
interval: The width of each bar (30min, 1hour, 1day, etc)
"""
# Find which ranges of time have been downloaded
ranges = self.downloaded_ranges.get(symbol, {}).get(interval, [])
# If nothing has been downloaded yet
if len(ranges) == 0:
# Add the start/end markers
ranges[start] = end
self._save_range(symbol, start, end, interval)
return
# Here, we optimize!
# Any ranges that are already downloaded within this range
# we don't need to download again.
for range_start, range_end in sorted(ranges.items()):
if range_end > start:
start = range_end
# extend this range to the right from range_end to end
ranges[range_start] = end
if range_start < end:
end = range_start
# extend this range to the left from range_start to start
del ranges[range_start]
ranges[start] = range_end
if range_start <= start <= end <= range_end:
del ranges[range_start]
self._save_range(symbol, start, end, interval)
def get_price(self, symbol, interval, time):
"""
This gets the value of a stock at self.current_time.
If the stock is not loaded yet in the cache, it will
load the data from current_time up to end_time.
Args:
symbol (string): The stock symbol to look up
"""
self.cache_stock_data(symbol, time, time, interval)
return self.cache[symbol][interval][time] | zorroclient | /zorroclient-0.0.1a0-py3-none-any.whl/zorro/price_cache.py | price_cache.py |
# zos-util
This module provides a Python interface into various z/OS utilities
# API
See [here](https://www.ibm.com/support/knowledgecenter/SSCH7P_3.8.0/zos_util.html) for the API
# Example
import zos_util
import tempfile
f = tempfile.NamedTemporaryFile()
# To specify a file with IBM-1047 code set
fpath = f.name
zos_util.chtag(fpath, 1047)
# To specify a file with ISO8859-1 code set
zos_util.chtag(fpath)
tag_info = zos_util.get_tag_info(fpath)
print(f"CCSID:{tag_info[0]}, TXT_FLAG:{tag_info[1]}")
# set to tag_mixed mode
zos_util.tag_mixed(fpath)
tag_info = zos_util.get_tag_info(fpath)
print(f"CCSID:{tag_info[0]}, TXT_FLAG:{tag_info[1]}")
# remove the tag from the file
zos_util.untag(fpath)
tag_info = zos_util.get_tag_info(fpath)
print(f"CCSID:{tag_info[0]}, TXT_FLAG:{tag_info[1]}")
# Build Instruction
`python3 ./setup install`
# Test Instruction
`cd test`
`python3 ./tag_test.py`
| zos-util | /zos_util-1.0.1.tar.gz/zos_util-1.0.1/README.md | README.md |
0.1.0 (2022-04-20)
------------------
* First release on PyPI.
0.2.0 (2022-05-02)
----------------------
* Add initial support for CPCs, LPARs, and logical CPUs
* Change minimum python level to 3.7 so I can use dataclasses. 3.6 is EOL anyway.
0.3.0 (2022-05-04)
----------------------
* Add support for PROCVIEW CPU systems
0.3.1 (2022-05-04)
----------------------
* Had conflicting requirements for twine in requirements_dev.txt
0.4.0 (2022-05-10)
----------------------
* Add some additional cpc and lpar fields
* Automate build and publishing to Pypi
0.5.0 (2022-05-25)
----------------------
* Strip out leading spaces from inputs (because sometimes they're getting passed in that way)
0.5.3 (2022-06-13)
----------------------
* Bugfixes
0.6.0 (2023-06-25)
----------------------
* Add initial support for IEE200I (D ASM output)
| zos-utilities | /zos-utilities-0.6.0.tar.gz/zos-utilities-0.6.0/HISTORY.rst | HISTORY.rst |
=============
zos-utilities
=============
.. image:: https://img.shields.io/pypi/v/zos-utilities.svg
:target: https://pypi.python.org/pypi/zos-utilities
:alt: Pypi
.. image:: https://github.com/Tam-Lin/zos-utilities/actions/workflows/build_and_test.yml/badge.svg
:target: https://github.com/Tam-Lin/zos-utilities/actions/workflows/build_and_test.yml
:alt: Build and Test Status
.. image:: https://readthedocs.org/projects/zos-utilities/badge/?version=latest
:target: https://zos-utilities.readthedocs.io/en/latest/?version=latest
:alt: Documentation Status
.. image:: https://img.shields.io/pypi/pyversions/zos-utilities.svg
:target: https://img.shields.io/pypi/pyversions/zos-utilities.svg
:alt: Python versions
Library for performing various utility functions needed for z/OS libraries. I have a couple of libraries that do
various things for/with z/OS, and they all need to convert from the z/OS Julian Date to datetime, so I thought I might
as well put it into a library. I'm also starting to build a representation of z/OS and IBM Z from an infrastructure
perspective.
| zos-utilities | /zos-utilities-0.6.0.tar.gz/zos-utilities-0.6.0/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/Tam-Lin/zos-utilities/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
zos-utilities could always use more documentation, whether as part of the
official zos-utilities docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/Tam-Lin/zos-utilities/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zos-utilities` for local development.
1. Fork the `zos-utilities` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zos-utilities.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zos-utilities
$ cd zos-utilities/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 zos-utilities tests
$ python setup.py test or pytest
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.6, 3.7, 3.8, 3.9 and 3.10. Check
https://github.com/Tam-Lin/zos-utilities/workflows/check_pr.yaml
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ pytest tests.test_zos-utilities
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bump2version patch # possible: major / minor / patch
$ git push
$ git push --tags
| zos-utilities | /zos-utilities-0.6.0.tar.gz/zos-utilities-0.6.0/CONTRIBUTING.rst | CONTRIBUTING.rst |
import re
import logging
from collections import OrderedDict
import dataclasses
from datetime import datetime
from .logical_cpu import Logical_CPU
from .data_set import DataSet
from .dasd_volume import DasdVolume
@dataclasses.dataclass
class LPAR:
"""
Represents an IBM z/OS LPAR
"""
logical_processors: dict = dataclasses.field(default_factory=OrderedDict)
physical_cpus: dict = dataclasses.field(default_factory=OrderedDict)
hiperdispatch: bool = None
mt_mode: bool = None
cp_mt_mode: bool = None
ziip_mt_mode: bool = None
cpc_nd: str = None
cpc_si: str = None
cpc_model: str = None
cpc_id: str = None
cpc_name: str = None
lpar_name: str = None
lpar_id: str = None
css_id: str = None
mif_id: str = None
name: str = None
part_id: str = None
partition_number: str = None
CPC: str = None
shared_processors: int = None
active: bool = None
IPL_volume: bool = None
os: bool = None
os_name: bool = None
os_level: bool = None
last_updated: datetime = None
url: str = None
start_update: datetime = None
finish_update: datetime = None
status: str = None
number_general_cpus: int = None
number_reserved_general_cpus: int = None
number_general_cores: int = None
number_reserved_general_cores: int = None
number_ziip_cpus: int = None
number_reserved_ziip_cpus: int = None
number_ziip_cores: int = None
number_reserved_ziip_cores: int = None
number_ifl_cpus: int = None
number_reserved_ifl_cpus: int = None
number_ifl_cores: int = None
number_reserved_ifl_cores: int = None
number_icf_cpus: int = None
number_reserved_icf_cpus: int = None
number_icf_cores: int = None
number_reserved_icf_cores: int = None
general_cp_weight_initial: int = None
general_cp_weight_current: int = None
general_cp_weight_minimum: int = None
general_cp_weight_maximum: int = None
zaap_weight_initial: int = None
zaap_weight_current: int = None
zaap_weight_minimum: int = None
zaap_weight_maximum: int = None
ziip_weight_initial: int = None
ziip_weight_current: int = None
ziip_weight_minimum: int = None
ziip_weight_maximum: int = None
ifl_weight_initial: int = None
ifl_weight_current: int = None
ifl_weight_minimum: int = None
ifl_weight_maximum: int = None
icf_weight_initial: int = None
icf_weight_current: int = None
icf_weight_minimum: int = None
icf_weight_maximum: int = None
storage: int = None
initial_central_storage: int = None
current_central_storage: int = None
maximum_central_storage: int = None
plpa_data_set: DataSet = None
common_data_set: DataSet = None
local_data_set: dataclasses.field(default_factory=list()) = None
scm = None
def parse_d_m_core(self, iee174i_message):
"""
Takes the output of the response to 'D M=CORE' and builds a representation of the
system logical processor state at that time
:param core_status_message: The output of the message you want parsed
:return: Updates the internal state information of the lpar
"""
logger = logging.getLogger(__name__)
if iee174i_message[0].split()[0] != "IEE174I":
message = str("Incorrect message passed in; expected IEE174I, got %s" %
iee174i_message[0].split()[0])
logging.error(message)
raise LPARException(message)
split_line_1 = iee174i_message[1].split()
logger.debug(split_line_1)
hd_value = split_line_1[2][3]
if hd_value == "Y":
self.hiperdispatch = True
elif hd_value == "N":
self.hiperdispatch = False
else:
message = str("HD= should be Y or N; got %s" % hd_value)
logging.error(message)
raise LPARException(message)
mt_value = split_line_1[3][3]
if split_line_1[3][0:3] != "MT=":
message = ("MT= was not in the correct place; got %s" % split_line_1[3][0:3])
logging.error(message)
raise LPARException(message)
if mt_value.isdigit():
self.mt_mode = int(mt_value)
else:
message = ("MT= should be a number; got %s" % mt_value)
logging.error(message)
raise LPARException(message)
if self.mt_mode == 1:
pass
else:
cp_mt_mode = split_line_1[5][3]
if split_line_1[5][0:3] != "CP=":
message = "CP= was not in the correct place"
logging.error(message)
raise LPARException(message)
if split_line_1[5][3].isdigit():
self.cp_mt_mode = int(cp_mt_mode)
else:
message = ("CP= should be a number; got %s" % cp_mt_mode)
logging.error(message)
raise LPARException(message)
ziip_mt_mode = split_line_1[6][5]
if split_line_1[6][0:5] != "zIIP=":
message = ("zIIP= was not in the correct place; got %s" % split_line_1[6][0:5])
logging.error(message)
raise LPARException(message)
if ziip_mt_mode.isdigit():
self.ziip_mt_mode = int(ziip_mt_mode)
else:
message = ("zIIP= should be a number, got %s" % ziip_mt_mode)
logging.error(message)
raise LPARException(message)
core_re = re.compile(
'(?P<coreid>[0-9A-F]{4}) (?P<wlmmanaged>.)(?P<online>.)(?P<type>.) '
'(?P<lowid>[0-9A-F]{4})-(?P<highid>[0-9A-F]{4})( (?P<polarity>.)(?P<parked>.)'
' (?P<subclassmask>[0-9A-F]{4}) (?P<state1>.)(?P<state2>.))?')
linenum = 3
for linenum, line in enumerate(iee174i_message[3:], start=3):
core_info = core_re.search(line)
if core_info is None:
break
else:
core = Logical_CPU()
core.coreid = core_info.group("coreid")
if core_info.group("online") == "+":
core.online = True
else:
core.online = False
if core_info.group("type") == " ":
core.type = "CP"
elif core_info.group("type") == "I":
core.type = "zIIP"
core.lowid = core_info.group("lowid")
core.highid = core_info.group("highid")
core.polarity = core_info.group("polarity")
if core_info.group("parked") == "P":
core.parked = True
else:
core.parked = False
core.subclassmask = core_info.group("subclassmask")
if core_info.group("state1") == "+":
core.core_1_state = "online"
elif core_info.group("state1") == "N":
core.core_1_state = "not_available"
elif core_info.group("state1") == "-":
core.core_1_state = "offline"
if core_info.group("state2") == "+":
core.core_2_state = "online"
elif core_info.group("state2") == "N":
core.core_2_state = "not_available"
elif core_info.group("state2") == "-":
core.core_2_state = "offline"
self.logical_processors[core.coreid] = core
linenum += 1
if iee174i_message[linenum].lstrip().startswith("CPC ND = "):
self.cpc_nd = iee174i_message[linenum].lstrip()[9:].rstrip()
else:
error = ("line didn't start with CPC ND =; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
linenum += 1
if iee174i_message[linenum].lstrip().startswith("CPC SI = "):
self.cpc_si = iee174i_message[linenum].lstrip()[9:].rstrip()
else:
error = ("line didn't start with CPC SI =; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
linenum += 1
if iee174i_message[linenum].lstrip().startswith("Model: "):
self.cpc_model = iee174i_message[linenum].lstrip()[7:].rstrip()
else:
error = ("line didn't start with Model =; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
linenum += 1
if iee174i_message[linenum].lstrip().startswith("CPC ID = "):
self.cpc_id = iee174i_message[linenum].lstrip()[9:].rstrip()
else:
error = ("line didn't start with CPC ID = ; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
linenum += 1
if iee174i_message[linenum].lstrip().startswith("CPC NAME = "):
self.cpc_name = iee174i_message[linenum].lstrip()[11:].rstrip()
else:
error = ("line didn't start with CPC NAME = ; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
linenum += 1
if iee174i_message[linenum].lstrip().startswith("LP NAME = "):
self.lpar_name = iee174i_message[linenum].lstrip()[10:14].rstrip()
else:
error = ("line didn't start with LP NAME = ; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
if iee174i_message[linenum][21:].lstrip().startswith("LP ID = "):
self.lpar_id = iee174i_message[linenum].lstrip()[29:].rstrip()
else:
error = ("LP ID not where I expected; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
linenum += 1
if iee174i_message[linenum].lstrip().startswith("CSS ID = "):
self.css_id = iee174i_message[linenum].lstrip()[10:].rstrip()
else:
error = ("line didn't start with CSS ID = ; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
linenum += 1
if iee174i_message[linenum].lstrip().startswith("MIF ID = "):
self.mif_id = iee174i_message[linenum].lstrip()[10:].rstrip()
else:
error = ("line didn't start with MIF ID = ; got %s" % iee174i_message[linenum])
logger.error(error)
raise LPARException(error)
def parse_d_asm(self, iee200i_message):
logger = logging.getLogger(__name__)
if iee200i_message[0].split()[0] != "IEE200I":
message = str("Incorrect message passed in; expected IEE200I, got %s" %
iee200i_message[0].split()[0])
logger.error(message)
raise LPARException(message)
for linenum, line in enumerate(iee200i_message[2:], start=2):
split_line = line.split()
storage_type = split_line[0]
if storage_type in ("PLPA", "COMMON", "LOCAL"):
dev = split_line[4]
dataset_name = split_line[5]
dataset = DataSet(name=dataset_name, location=DasdVolume(unit_address=dev))
if storage_type == "PLPA":
self.plpa_data_set = dataset
elif storage_type == "COMMON":
self.common_data_set = dataset
elif storage_type == "LOCAL":
try:
self.local_data_set.append(dataset)
except AttributeError:
self.local_data_set = [dataset]
if storage_type == "SCM":
self.scm = True
class LPARException(Exception):
pass | zos-utilities | /zos-utilities-0.6.0.tar.gz/zos-utilities-0.6.0/src/zos_utilities/lpar.py | lpar.py |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install zos-utilities, run this command in your terminal:
.. code-block:: console
$ pip install zos-utilities
This is the preferred method to install zos-utilities, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for zos-utilities can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/Tam-Lin/zos-utilities
Or download the `tarball`_:
.. code-block:: console
$ curl -OJL https://github.com/Tam-Lin/zos-utilities/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/Tam-Lin/zos-utilities
.. _tarball: https://github.com/Tam-Lin/zos-utilities/tarball/master
| zos-utilities | /zos-utilities-0.6.0.tar.gz/zos-utilities-0.6.0/docs/installation.rst | installation.rst |
=========
zosftplib
=========
**A FTP subclass which adds some Mainframe z/OS features like job submission, execution of sql/DB2 queries, ...**
Usage
=====
::
import zosftplib
Myzftp = zosftplib.Zftp(mvshost, mvsuser, passwd,
timeout=500.0, sbdataconn='(ibm-1147,iso8859-1)')
Features
========
submitting sql/DB2 queries and retrieving their outputs
-------------------------------------------------------
::
with open('/tmp/systables.csv', 'w') as outfile:
for line in Myzftp.exec_sql("SELECT * FROM SYSIBM.SYSTABLES WITH UR"):
outfile.write(';'.join(line.split()) + '\n')
submitting batch jobs, pending their outputs
--------------------------------------------
::
# easy job for zos:
job = Myzftp.submit_wait_job('//IBMUSERX JOB MSGLEVEL(1,1)\n'
'//STEP001 EXEC PGM=IEFBR14',
purge=True)
print "rc:", job["rc"], "Jes status:", job["status"]
for line in job["output"]:
print line
This produces the following output::
rc: RC=0000 Jes status: OUTPUT (job purged)
1 J E S 2 J O B L O G -- S Y S T E M S Y S 1 -- N O D E N 1
0
17.49.35 JOB03914 ---- WEDNESDAY, 27 NOV 2013 ----
17.49.35 JOB03914 IRR010I USERID IBMUSER IS ASSIGNED TO THIS JOB.
17.49.35 JOB03914 ICH70001I IBMUSER LAST ACCESS AT 17:47:56 ON WEDNESDAY, NOVEMBER 27, 2013
17.49.35 JOB03914 $HASP373 IBMUSERX STARTED - INIT 1 - CLASS A - SYS SYS1
17.49.35 JOB03914 IEF403I IBMUSERX - STARTED - TIME=17.49.35
17.49.35 JOB03914 IEF404I IBMUSERX - ENDED - TIME=17.49.35
17.49.35 JOB03914 $HASP395 IBMUSERX ENDED
0------ JES2 JOB STATISTICS ------
- 27 NOV 2013 JOB EXECUTION DATE
- 2 CARDS READ
- 24 SYSOUT PRINT RECORDS
- 0 SYSOUT PUNCH RECORDS
- 1 SYSOUT SPOOL KBYTES
- 0.00 MINUTES EXECUTION TIME
END OF JES SPOOL FILE
1 //IBMUSERX JOB MSGLEVEL(1,1) JOB03914
2 //STEP001 EXEC PGM=IEFBR14
END OF JES SPOOL FILE
ICH70001I IBMUSER LAST ACCESS AT 17:47:56 ON WEDNESDAY, NOVEMBER 27, 2013
IEF142I IBMUSERX STEP001 - STEP WAS EXECUTED - COND CODE 0000
IEF373I STEP/STEP001 /START 2013331.1749
IEF374I STEP/STEP001 /STOP 2013331.1749 CPU 0MIN 00.01SEC SRB 0MIN 00.00SEC VIRT 4K SYS 232K EXT 0K SYS 10780K
IEF375I JOB/IBMUSERX/START 2013331.1749
IEF376I JOB/IBMUSERX/STOP 2013331.1749 CPU 0MIN 00.01SEC SRB 0MIN 00.00SEC
z/OS Catalog and JES spool informations
---------------------------------------
::
for x in Myzftp.list_catalog('SYS1.*'):
print x["Dsname"], x["Dsorg"], x["Used"], "tracks"
# print all "ACTIVE" jobs:
for job in Myzftp.list_jes_spool('', '', 'ACTIVE'):
print job
This produces the following output::
JOBNAME JOBID OWNER STATUS CLASS
BPXAS STC04218 START2 ACTIVE STC
PORTMAP STC04182 START2 ACTIVE STC
BPXAS STC04179 START2 ACTIVE STC
NFSC STC04171 START2 ACTIVE STC
CICSA STC04170 START2 ACTIVE STC
TCPIP STC04162 TCPIP ACTIVE STC
TN3270 STC04163 START2 ACTIVE STC
SDSF STC04160 START2 ACTIVE STC 1 spool files
TSO STC04158 START1 ACTIVE STC 1 spool files
INIT STC04157 START2 ACTIVE STC
TCPIP STC04162 TCPIP ACTIVE STC
VTAM STC04147 START1 ACTIVE STC
RACF STC04164 START2 ACTIVE STC
...
Retrieve thousands of members
-----------------------------
::
Myzftp.get_members('SYS1.PARMLIB', '/tmp/parmlib/')
Myzftp.get_members('SYS1.LINKLIB', '/tmp/linklib/',
members='*', retr='binary', ftp_threads=10)
Get/put sequential text/binary z/OS file
----------------------------------------
::
Myzftp.download_binary('SYS1.MAN1', '/tmp/smf.bin')
Myzftp.upload_text('/tmp/bigdata.txt', 'IBMUSER.BIGDATA',
sitecmd='lrecl=1024 cyl pri=500 sec=100')
Installation
============
The package is available as a Pip package:
``$ sudo pip install zosftplib``
Or using easy_install:
``$ sudo easy_install zosftplib``
Changelog
=========
2.0 - (2019-01-15)
1.0 - (2013-11-25)
Initial release.
| zosftplib | /zosftplib-2.0.tar.gz/zosftplib-2.0/README.rst | README.rst |
import re
import time
import random
import os.path
import subprocess
import _thread, threading
import io
import ftplib
def test_hostname_alive(host):
"ping a host"
# $TODO : test under "windows"
ping = subprocess.Popen("ping -q -c2 -W 2 " + host, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines = True)
exitcode = ping.wait()
response = ping.stdout.read()
errors = ping.stderr.read()
if exitcode in (0, 1):
life = re.search(r"(\d) received", response)
if life and life.group(1) == '0': # 1 packets transmitted, 0 received,
raise ZftpError("Unknown hostname: %s (if ICMP ping is blocked"
", try this: Zftp(..,ping=False) )"%host)
else:
if 'unknown host' in errors:
raise ZftpError("Unknown hostname: %s (%s)"%(host, errors))
else:
raise ZftpError("Ping hostname: %s error (%s)"%(host, errors))
def ensure_cntlfile(file_or_text, jobcontrol=None):
"""
To check if the JCL or SQL input 'ctnl file' is correct (80 columns
max line length). Returns an file object
- file_or_text: can be pathname (whose existence was previously tested),
a string or a file object
- control: test if jobcard 'JOBNAME' value in jcl is conform to the JOBNAME
parameter. This is necessary for the list_jes_spool method to find JOBNAME
(with jes 'JESJOBNAME' subcommand)
"""
if isinstance(file_or_text, io.IOBase):
fout = file_or_text
text = fout.read()
fout.seek(0) # at beginning
elif isinstance(file_or_text, str):
if os.path.isfile(file_or_text):
text = open(file_or_text, 'rt').read()
fout = open(file_or_text, 'rb')
fout.seek(0) # at beginning
else:
text = file_or_text
fout = io.BytesIO(text.encode())
else:
raise ZftpError("invalid cntlfile type: %s"%(file_or_text,))
lines_error = [l for l in text.splitlines() if len(l.rstrip()) > 80]
if lines_error:
raise ZftpError("invalid cntlfile record length: %s (should be <= 80)"
" %s"%(len(lines_error[0]),lines_error[0]))
if jobcontrol:
job_re = re.compile(r"^[\n]?//(\S+)\s+JOB\s+", flags=re.MULTILINE)
sx0 = re.search(job_re, text)
if not sx0:
raise ZftpError("invalid jobcard: '%s' in JCL file "
% (text[:80].strip(),))
if not sx0.group(1).startswith(jobcontrol):
raise ZftpError("invalid jobname %s parameter in JCL file "
"(JESJOBNAME= %s)"%
(sx0.group(1), jobcontrol))
return fout
def sanitize_mvsname(name):
" sanitize mvs dataset name "
if name:
return "'" + name.strip().replace("'","").replace('"','') + "'"
else:
return name
class ZftpError( BaseException ):
"""ZosFtp error."""
def __init__(self, value):
super(ZftpError, self).__init__(value)
self.value = value
def __str__(self):
return repr(self.value)
class JclError(ZftpError):
""" jcl error "
IEFC629I INCORRECT USE OF APOSTROPHE ON THE JOB STATEMENT
IEF212I <jobname> stepname ddname> - DATA SET NOT FOUND
IEFC452I INVALID - JOB NOT RUN - JCL ERROR 848
IEFC452I <jobname> - JOB NOT RUN - JCL ERROR 858
IEFC605I UNIDENTIFIED OPERATION FIELD
IEF643I UNIDENTIFIED POSITIONAL PARAMETER IN THE DISP FIELD
IEFC662I INVALID LABEL
"""
pass
class Zftp(ftplib.FTP):
"""
MVS z/OS FTP subclass
"""
# To parse nlst() output :
CATALOG_OFS = (('Volume', 0, 6), ('Unit', 7, 14), ('Referred', 14, 24),
('Ext', 24, 27), ('Used', 27, 32), ('Recfm', 34, 39),
('Lrecl', 39, 44), ('BlkSz', 45, 50), ('Dsorg', 51, 55),
('Dsname', 56, 100))
PDSLOAD_OFS = (('Size', 10, 16), ('TTR', 17, 23), ('Alias-of', 24, 32),
('AC', 33, 35), ('Attributes', 36, 66), ('Amode', 68, 71),
('Rmode', 74, 77))
PDSTXT_OFS = (('VV.MM', 10, 15), ('Created', 16, 26), ('Changed', 27, 37),
('Heure', 38, 43), ('Size', 44, 49), ('Init', 50, 79),
('Size', 44, 79), ('Id', 44, 79))
def __init__(self, host='', user='', passwd='', acct='', timeout=600.0,
sbdataconn='', **kwargs):
self.__ping = kwargs.get('ping', False)
if self.__ping: # caution: a host can be configured
test_hostname_alive(host) # to block icmp pings
self.__kwargs = kwargs
try:
ftplib.FTP.__init__(self, host, user, passwd, acct, timeout)
except TypeError: # timeout not supported ?
ftplib.FTP.__init__(self, host, user, passwd, acct)
self.timeout = None
syst = self.sendcmd('SYST')
if not 'z/OS' in syst:
raise ZftpError("host %s is not a MVS or z/OS platform: %s"
%(host, syst))
if sbdataconn:
self.sendcmd('SITE sbdataconn=' + sbdataconn)
self.sbdataconn = sbdataconn
self.stats = self.sendcmd('STAT')
self.stats = self.sendcmd('STAT')
pos_ftyp = self.stats.find('211-FileType') + 12
pos_jesint = self.stats.find('211-JESINTERFACELEVEL') + 25
self.filetype = self.stats[pos_ftyp :pos_ftyp + 3]
self.__jesinterfacelevel = self.stats[pos_jesint :pos_jesint + 1]
self.__offsets = None
self.__processed_members = 0
self.__jobid = None
def login(self, user = '', passwd = '', acct = ''):
self.user = user
self.passwd = passwd
self.acct = acct
ftplib.FTP.login(self, user, passwd, acct)
def _setfiletype(self, filetype='SEQ'):
"""Switch z/OS FTP filetype parameter : SEQ, JES, DB2
"""
if not self.filetype == filetype:
self.sendcmd('SITE filetype=' + filetype)
self.filetype = filetype
def getresp(self):
"""
ftplib.getresp :
parse JOBNAME in 250/125 z/OS FTP response
"""
resp = self.getmultiline()
if self.debugging:
print('*resp*', self.sanitize(resp))
self.lastresp = resp[:3]
c = resp[:1]
if c in ('1', '2', '3'):
if resp[:3] in('250','125'): #|Zftp spec
sx0 = re.search(r"\s+(JOB\d{5})\s+", resp) #|
if sx0: #|
self.__jobid = sx0.group(1) #|
return resp
if c == '4':
raise ftplib.error_temp(resp)
if c == '5':
raise ftplib.error_perm(resp)
raise ftplib.error_proto(resp)
def download_text(self, mvsname, localpath):
" download one file by FTP in text mode "
self._setfiletype('SEQ')
localfile = open(localpath, 'w')
mvsname = sanitize_mvsname(mvsname)
def callback(line):
localfile.write(line + '\n')
self.retrlines('RETR ' + mvsname, callback)
localfile.close()
def download_binary(self, mvsname, localpath, sitecmd=''):
" download one file by FTP in binary mode "
self._setfiletype('SEQ')
sitecmd = sitecmd or 'RDW'
self.sendcmd('SITE ' + sitecmd)
localfile = open(localpath, 'wb')
mvsname = sanitize_mvsname(mvsname)
self.retrbinary('RETR ' + mvsname, localfile.write)
localfile.close()
def upload_text(self, localpath, mvsname, sitecmd=''):
" upload one file by FTP in text mode "
self._setfiletype('SEQ')
sitecmd = sitecmd or 'lrecl=80 blk=3200 cyl pri=1 sec=5'
self.sendcmd('SITE ' + sitecmd)
mvsname = sanitize_mvsname(mvsname)
localfile = open(localpath, 'rb')
self.storlines('STOR ' + mvsname, localfile)
localfile.close()
def upload_binary(self, localpath, mvsname, sitecmd=''):
" upload one file by FTP in binary mode "
self._setfiletype('SEQ')
sitecmd = sitecmd or 'lrecl=80 blk=3200 cyl pri=1 sec=5'
self.sendcmd('SITE ' + sitecmd)
mvsname = sanitize_mvsname(mvsname)
localfile = open(localpath, 'rb')
self.storbinary('STOR ' + mvsname, localfile)
localfile.close()
def list_jes_spool(self, jobmask='', owner='', status='ALL'):
"""
list all jobname from jes spool where jobname like mask, owner and status
"""
jes_spool = []
if status.upper() not in ('ALL', 'INPUT', 'OUTPUT', 'ACTIVE'):
status = 'ALL'
try:
self._setfiletype('JES')
if jobmask:
self.sendcmd('SITE JESJOBNAME=' + jobmask)
if owner:
self.sendcmd('SITE JESOWNER=' + owner)
self.sendcmd('SITE JESSTATUS=' + status)
self.dir(jes_spool.append)
except (ZftpError, ftplib.Error) as msg:
if '550 No jobs found' in str(msg):
return jes_spool
else:
raise
return jes_spool
def get_job_infos(self, jobid, jobmask='*'):
"""
retrieve JES spool information from a jobid and a jobmask
"""
# jesinterfacelevel1 output regexp:
sr_v1 = re.compile(r"\s*(\S+)\s+(\S+)\s+(\S+)\s+(\d+) Spool Files")
# jesinterfacelevel2 output regexp:
sr_v2 = re.compile(r"\s*(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(.*) "
r"(\d+) spool files")
job = {"jobid":jobid, "owner":'?', "class":'?', "rc":'?',
"jobname":'?', "spool":'?'}
for linej in self.list_jes_spool(jobmask, status='ALL'):
if not linej.startswith('JOBNAME'):
if self.__jesinterfacelevel == '2':
sx0 = re.search(sr_v2, linej)
if sx0 and sx0.group(2) == jobid:
(job["jobname"],
_, # jobid
job["owner"],
job["status"],
job["class"],
job["rc"],
job["spool"]) = sx0.group(1, 2, 3, 4, 5, 6, 7)
break
else: #self.__jesinterfacelevel = '1'
sx0 = re.search(sr_v1, linej)
if sx0:
(job["jobname"],
_, # jobid
job["status"],
job["spool"]) = sx0.group(1, 2, 3, 4)
break
return job
def submit_job(self, jcl, jobname='', retry_info=3):
"""
You can submit a job using FTP by putting a jcl file
in JES 'internal reader'
"""
resp = ''
job = None
retry_info = retry_info or 0
try:
self._setfiletype('JES')
self.__jobid = None
resp = self.storlines('STOR INTRDR',ensure_cntlfile(jcl, jobcontrol=jobname))
while retry_info > 0 :
job = self.get_job_infos(self.__jobid, jobname + '*')
if job["rc"] == '?':
if self.debugging:
print("** retry get_job_infos **", retry_info)
retry_info -= 1
time.sleep(1)
else:
break
except ZftpError as msg:
raise ZftpError( "submit_job error: %s (last response:%s)"
%(msg, resp))
except ftplib.all_errors as msg:
raise ZftpError( "submit_job error: %s (last response:%s)"
%(msg, resp))
except OSError as msg:
raise ZftpError( "submit_wait_job error: %s (last response:%s)"
%(msg, resp))
return job
def submit_wait_job(self, jcl, jobname='', cntlfile='', spoolback=None,
purge = False, timeout=None):
"""
You can submit a job using FTP and automatically receive your output.
Rather than using the JCL you built on the FTP client, this function
uses the JCL you have built on the FTP server.
Automatic retrieval of jobs works only if the file contains a single
job. It does not work for files that include more than one job
(multiple JOB cards).
"""
output = []
resp = ''
spoolback = spoolback or (lambda line: output.append(line))
cntlfile = sanitize_mvsname(cntlfile) or ("'" + self.user +
".FTPTEMP0.CNTL'")
jobname = jobname or self.user
timeout = timeout or self.timeout
if timeout != self.timeout: # new timeout value => reconnect
self.close()
self.__init__(self.host, self.user, self.passwd, self.acct, timeout,
self.sbdataconn, **self.__kwargs)
try:
self._setfiletype('SEQ')
resp = self.storlines('STOR ' + cntlfile,
ensure_cntlfile(jcl, jobcontrol=jobname))
self._setfiletype('JES NOJESGETBYDSN')
#200-JESINTERFACELEVEL=1. value of JESJOBname cannot be modified
if (self.__jesinterfacelevel == '1' and
not jobname.startswith(self.user)):
raise ZftpError("JESINTERFACELEVEL=1, The value of "
"JESJOBname cannot be modified: %s"%jobname)
else:
self.sendcmd('SITE JESJOBNAME=' + jobname + '*')
self.__jobid = None
resp = self.retrlines('RETR ' + cntlfile, spoolback)
#self.__jobid was parsed from self.getresp() during RETR cmd
job = self.get_job_infos(self.__jobid, jobname + '*')
if purge and job["jobid"]:
resp = self.sendcmd('DELE ' + job["jobid"])
job["status"] += ' (job purged)'
job["output"] = output
except ZftpError as msg:
raise ZftpError( "submit_wait_job error: %s (last response:%s)"
%(msg, resp))
except ftplib.all_errors as msg:
raise ZftpError( "submit_wait_job error: %s (last response:%s)"
%(msg, resp))
except OSError as msg:
raise ZftpError( "submit_wait_job error: %s (last response:%s)"
%(msg, resp))
return job
# When submitting a job and automatically receiving the output, remember that
# your session is suspended. You should use care, based on the anticipated run
# time of your job, when using this function. If your session times out, you
# must restart FTP and manually retrieve your output. Session timeouts are
# caused by the following:
# The FTP Server does not wait long enough for the job that is executing to end
# Increase the JESPUTGETTO interval in the FTP.DATA data statement on the
# server. This defaults to 10 minutes and defines the amount of time FTP waits
# for the submitted job to complete before timing out.
# The FTP client does not wait long enough for the job to complete and the
# server to retrieve the output. Increase DATACTTIME timer value in the client.
# This defaults to two minutes and defines the amount of time the client waits
# for a response from the server. The control or data connection is closed.
# This is usually caused by a firewall that timed out the session because of
# inactivity. Add FTPKEEPALIVE (control connection) and DATAKEEPALIVE
# (data connection) statements in the FTP.DATA data file. FTP client and FTP
# Server receive resets. This is usually caused by a firewall that timed out
# the session because of a lack of activity. Add an FTPKEEPALIVE statement or
# decrease the time interval on the current FTPKEEPALIVE statement in the
# FTP.DATA data file. The keepalive value on FTPKEEPALIVE must be less than
# the expected value of the server.
def db2_ssid(self):
"""Find DB2 subsystem name in FTP STATS cmd outputs"""
db2_re = re.compile(r"211-SITE DB2 subsystem name is (.*)")
db2_find = re.findall(db2_re, self.stats)
if db2_find:
return db2_find[0]
else:
return None
def exec_sql(self, query='', cntlfile='', db2id='', spread=''):
"""
Allows submitting DB2 queries (select) and retrieving their output
| DB2 plan EZAFTPMQ should be declared in in your 'TCPIP.FTP.DATA':
| DB2PLAN EZAFTPMQ ; db2 plan name for OE-FTP
| and authorised in Db2: 'GRANT EXECUTE ON PLAN EZAFTPMQ TO PUBLIC'
"""
resultset = []
resp = ''
cntlfile = sanitize_mvsname(cntlfile) or ("'" + self.user +
".FTPTEMP0.SQL'")
spread = spread or "SPREAD"
db2id = db2id or self.db2_ssid()
if not db2id:
raise ZftpError( "exec_sql DSN Error (DB2 subsystem name):"
"%s" % (db2id,))
query = query or ("select 'DB2 Subsystem " + db2id +
" is OK !' from SYSIBM.SYSDUMMY1")
try:
self._setfiletype('SQL')
if db2id:
self.sendcmd('SITE DB2=' + db2id)
self.sendcmd('SITE ' + spread)
self.cwd("''")
self.sendcmd('SITE lrecl=80 blk=3200 cyl pri=1 sec=5')
resp = self.storlines('STOR ' + cntlfile, ensure_cntlfile(query))
self.retrlines("RETR " + cntlfile, lambda l: resultset.append(l))
except ftplib.Error as msg:
if resultset:
hlp = ''.join([l + '\n' for l in resultset])
else:
hlp = ("control DB2 parameters in your 'TCPIP.FTP.DATA' file:\n"
" DB2 xxxx ; db2 subsystem name\n"
" DB2PLAN EZAFTPMQ ; db2 plan name for OE-FTP")
raise ZftpError( "exec_sql Error %s %s (%s)" % (msg, resp, hlp))
return resultset
def list_catalog(self, mask):
""" Scans the MVS Catalog and returns a list of dictionaries with keys:
'Recfm', 'Used', 'Lrecl', 'Dsname', 'Dsorg', 'Volume', 'Ext',
'BlkSz', 'Unit', 'Referred'
"""
def parse_and_store_catalog_line(line):
""" parse ftp dir cmd outputs
"""
if 'DSNAME' in line.upper():
self.__offsets = self.CATALOG_OFS
else:
entry = {}
entry["Dsname"] = line[56:].replace("'","")
for (label, pos , length) in self.__offsets:
if not label == 'Dsname':
if "User catalog connector" in line:
entry[label] = None
elif "Error determining attributes" in line:
entry[label] = '?'
else:
entry[label] = line[pos:length].strip()
catalog.append(entry)
catalog = []
self.__offsets = None
mask = sanitize_mvsname(mask)
self._setfiletype('SEQ')
self.cwd("''")
try:
self.dir(mask, parse_and_store_catalog_line)
except (ZftpError, ftplib.error_perm) as msg:
if '550 No data sets found' in str(msg):
return catalog
else:
raise
return catalog
def get_pds_directory(self, pdsname, attrs=False, samples=None):
""" Returns a dictionnary from PDS directory
Attributes are different between sources and loads member
"""
def parse_and_store_directory_line(line):
""" parse ftp dir cmd output from a PDS
"""
if 'NAME' in line.upper(): # first line
if 'Alias-of' in line: # is a loadmodule directory
self.__offsets = self.PDSLOAD_OFS
else:
self.__offsets = self.PDSTXT_OFS
else:
try:
member = line[0:8].strip()
directory[member] = [line[pos:length].strip()
for (_, pos , length) in self.__offsets]
except Exception as msg:
raise ZftpError("parse_and_store_directory_line error:"
"line=%s msg=%s offsets=:%s"
%(line, msg, self.__offsets))
directory = {}
self.__offsets = None
pdsname = sanitize_mvsname(pdsname)
self._setfiletype('SEQ')
# PDS test : '250 The working directory is a partitioned data set'
if not self.cwd(pdsname).find('is a partitioned data set') > 0:
raise ZftpError("dataset %s is not partitionned"%(pdsname,))
try:
if attrs:
self.dir(parse_and_store_directory_line)
else:
for entry in self.nlst():
directory[entry] = None
except (ZftpError, ftplib.error_perm) as msg:
if "550 No members found" in str(msg):
return directory
else:
raise ZftpError("get_pds_directory error: %s"%(msg))
if samples:
sample_dic = {}
try:
for memb in random.sample(directory, samples):
sample_dic[memb] = directory[memb]
return sample_dic
except ValueError:
del sample_dic
return directory
else:
return directory
def get_members(self, pdsname, localpath, lmembers='*', retr='',
callback=None, ftp_threads=1, samples=None, fmback=None):
"""
Retrieves members from a PDS
"""
def get_partial(partial_directory, partial_id=1, partial_lock=None):
""" get partial directory members
"""
# $TODO :
# - Exception handling :
# garbage in member name (Computer Associates PDSM...)
# - Multiple Get
if partial_id > 1: # create new session
partial_ftp = Zftp(self.host, self.user, self.passwd, self.acct,
self.timeout,self.sbdataconn,**self.__kwargs)
else:
partial_ftp = self # keep the current session
if self.debugging:
partial_ftp.set_debuglevel(self.debugging)
if not partial_ftp.cwd(pdsname).find('partitioned data set') > 0:
raise ZftpError("get_members error: dataset %s"
" is not partitionned"%(pdsname,))
for member in partial_directory:
onepath = os.path.join(localpath, member)
try:
if callback is None:
if retr == 'LINES':
fmemb = open(onepath,'wt')
partial_ftp.retrlines('RETR ' + member,
lambda l: fmemb.write('%s\n' % l))
fmemb.close()
else: # 'BINARY':
partial_ftp.retrbinary('RETR ' + member, open(
onepath, 'wb').write)
if fmback:
fmback(onepath) # member callback func ?
else:
if retr == 'LINES':
partial_ftp.retrlines('RETR ' + member,
lambda l: callback(l, member))
else: # 'BINARY':
partial_ftp.retrbinary('RETR ' + member, callback,
blocksize=8000)
if partial_lock:
partial_lock.acquire()
self.__processed_members += 1
partial_lock.release()
else:
self.__processed_members += 1
except ftplib.error_perm as msg:
echecs.append((member, msg))
if partial_id > 1:
partial_ftp.close()
else:
pass # not quit this session
echecs = []
self.__processed_members = 0
retr = retr.upper() or 'LINES'
pdsname = sanitize_mvsname(pdsname)
if not os.path.isdir(localpath):
raise ZftpError("get_members %s error, no such "
"directory %s"%(pdsname, localpath))
if lmembers in ('*', 'ALL'):
directory = self.get_pds_directory(pdsname, attrs=False,
samples=samples)
else:
if isinstance(lmembers, list) or isinstance(lmembers, tuple):
directory = lmembers
else:
raise ZftpError("get_members %s, liste members type error, '*'"
" or list or tuple expected: %s"%(pdsname, lmembers))
self._setfiletype('SEQ')
nb_members = len(directory)
if ftp_threads <= 1:
get_partial(list(directory)) #, localpath) # 1 session
else:
if ftp_threads > 16:
ftp_threads = min(16, nb_members // 10)
thread_list = []
lock_thread = _thread.allocate_lock() # init lock
# slice size:
if nb_members % ftp_threads == 0:
slice_len = nb_members // ftp_threads
else:
slice_len = (nb_members // ftp_threads) + 1
# prepare directory slicing:
full_dir = [d for d in directory if not d.startswith('PDS')]
full_dir.sort()
slice_num = 1
for pos in range(0, nb_members, slice_len): # prepare ftp threads..
slice_dir = full_dir[pos:pos + slice_len]
th0 = threading.Thread(target=get_partial,
args=(slice_dir, slice_num, lock_thread))
thread_list.append(th0)
slice_num += 1
for th0 in thread_list:
th0.start()
for th0 in thread_list:
th0.join()
if self.__processed_members != nb_members:
if self.__processed_members > 0:
raise ZftpError("get_members %s partial result "
"(%s Ok members(s)/%s), errors: %s"%
(pdsname, self.__processed_members,
nb_members,echecs))
else:
raise ZftpError("get_members %s all members in error "
"(%s members(s) OK/%s)"%
(pdsname, self.__processed_members, nb_members))
def get_text(self, mvsname):
" one file by FTP in text mode "
self._setfiletype('SEQ')
mvsname = sanitize_mvsname(mvsname)
class LineHandler(object):
def __init__(self):
self.text = ''
def update(self, line):
self.text += line + '\n'
callback = LineHandler()
self.retrlines('RETR ' + mvsname, callback.update)
return callback.text
def get_binary(self, mvsname, sitecmd=''):
" download one file by FTP in binary mode "
self._setfiletype('SEQ')
sitecmd = sitecmd or 'RDW'
self.sendcmd('SITE ' + sitecmd)
mvsname = sanitize_mvsname(mvsname)
class BlocHandler(object):
def __init__(self):
self.bloc = b''
def update(self, line):
self.bloc += line
callback = BlocHandler()
self.retrbinary('RETR ' + mvsname, callback.update)
return callback.bloc
if __name__ == '__main__':
ftp = Zftp("9.24.115.101", "IBMUSER", "AZERT")
#x= ftp.get_text("'SYS1.MACLIB(ABEND)'")
#print(x[0:80])
#print(ftp.get_pds_directory("EROL.LOAD.DEV"))
request = ftp.exec_sql("""SELECT COUNT(*) FROM IBMUSER.ECOLE_CLIENT""",db2id='DB9G',spread='SPREAD')
for l in request:
print("l:",l.replace('\t',';'))
request = ftp.exec_sql("""UPDATE IBMUSER.ECOLE_CLIENT SET PRENOM='TOTO52'\n WHERE EMAIL ='[email protected]'""",db2id='DB9G',spread='SPREAD')
for l in request:
print("l:",l.replace('\t',';'))
#x= ftp.get_text("EROL.COBOL.DEV(ARHTEST1)")
#print(x)
#x= ftp.get_binary("EROL.LOAD.DEV(ARHTEST1)")
#print(type(x),x)
#options=cobol_compile_options(x)
#print(options)
##
## j0 = ('//TOTO0 JOB (ACCT),SH,CLASS=A,MSGCLASS=H')
## j1 = ('//TOTO1 JOB (ACCT),SH,CLASS=A,MSGCLASS=H\n'
## '//STEP00 EXEC PGM=IEFBR14')
## j2 = ('//TOTO1 JOB (ACCT),SH,CLASS=A,MSGCLASS=H\n'
## '//STEP00 EXEC PGM=WIN32DLL')
## j3 = ('//TOTO0 JOB (ACCT),SH,CLASS=A,MSGCLASS=H')
## j4 = ('//TOTO0 JOB (ACCT),SH,CLASS=A,MSGCLASS=H'
## '________________________________________')
## # submit job: JCL ERROR
## job = ftp.submit_wait_job(j0, 'TOTO0', purge=True)
## print ("job0:",job["rc"])
##
## job = ftp.submit_wait_job(j1, 'TOTO1', purge=True)
## print ("job1:",job["rc"])
##
## job = ftp.submit_wait_job(j2, 'TOTO1', purge=True)
## print ("job2:",job["rc"])
##
## job = ftp.submit_wait_job(j3, 'TOTO0', purge=True)
## print ("job3:",job["rc"])
##
## job = ftp.submit_wait_job(j4, 'TOTO0', purge=True)
## print ("job4:",job["rc"])
## | zosftplib | /zosftplib-2.0.tar.gz/zosftplib-2.0/zosftplib.py | zosftplib.py |
=========
zosftplib
=========
**A FTP subclass which adds some Mainframe z/OS features like job submission, execution of sql/DB2 queries, ...**
Usage
=====
.. sourcecode :: python
import zosftplib
Myzftp = zosftplib.Zftp(mvshost, mvsuser, passwd,
timeout=500.0, sbdataconn='(ibm-1147,iso8859-1)')
Features
========
submitting sql/DB2 queries and retrieving their outputs
-------------------------------------------------------
.. sourcecode :: python
with open('/tmp/systables.csv', 'w') as outfile:
for line in Myzftp.exec_sql("SELECT * FROM SYSIBM.SYSTABLES WITH UR"):
outfile.write(';'.join(line.split()) + '\n')
submitting batch jobs, pending their outputs
--------------------------------------------
.. sourcecode :: python
# easy job for zos:
job = Myzftp.submit_wait_job('//IBMUSERX JOB MSGLEVEL(1,1)\n'
'//STEP001 EXEC PGM=IEFBR14',
purge=True)
print "rc:", job["rc"], "Jes status:", job["status"]
for line in job["output"]:
print line
This produces the following output::
rc: RC=0000 Jes status: OUTPUT (job purged)
1 J E S 2 J O B L O G -- S Y S T E M S Y S 1 -- N O D E N 1
0
17.49.35 JOB03914 ---- WEDNESDAY, 27 NOV 2013 ----
17.49.35 JOB03914 IRR010I USERID IBMUSER IS ASSIGNED TO THIS JOB.
17.49.35 JOB03914 ICH70001I IBMUSER LAST ACCESS AT 17:47:56 ON WEDNESDAY, NOVEMBER 27, 2013
17.49.35 JOB03914 $HASP373 IBMUSERX STARTED - INIT 1 - CLASS A - SYS SYS1
17.49.35 JOB03914 IEF403I IBMUSERX - STARTED - TIME=17.49.35
17.49.35 JOB03914 IEF404I IBMUSERX - ENDED - TIME=17.49.35
17.49.35 JOB03914 $HASP395 IBMUSERX ENDED
0------ JES2 JOB STATISTICS ------
- 27 NOV 2013 JOB EXECUTION DATE
- 2 CARDS READ
- 24 SYSOUT PRINT RECORDS
- 0 SYSOUT PUNCH RECORDS
- 1 SYSOUT SPOOL KBYTES
- 0.00 MINUTES EXECUTION TIME
END OF JES SPOOL FILE
1 //IBMUSERX JOB MSGLEVEL(1,1) JOB03914
2 //STEP001 EXEC PGM=IEFBR14
END OF JES SPOOL FILE
ICH70001I IBMUSER LAST ACCESS AT 17:47:56 ON WEDNESDAY, NOVEMBER 27, 2013
IEF142I IBMUSERX STEP001 - STEP WAS EXECUTED - COND CODE 0000
IEF373I STEP/STEP001 /START 2013331.1749
IEF374I STEP/STEP001 /STOP 2013331.1749 CPU 0MIN 00.01SEC SRB 0MIN 00.00SEC VIRT 4K SYS 232K EXT 0K SYS 10780K
IEF375I JOB/IBMUSERX/START 2013331.1749
IEF376I JOB/IBMUSERX/STOP 2013331.1749 CPU 0MIN 00.01SEC SRB 0MIN 00.00SEC
z/OS Catalog and JES spool informations
---------------------------------------
.. sourcecode :: python
for x in Myzftp.list_catalog('SYS1.*'):
print x["Dsname"], x["Dsorg"], x["Used"], "tracks"
# print all "ACTIVE" jobs:
for job in Myzftp.list_jes_spool('', '', 'ACTIVE'):
print job
This produces the following output::
JOBNAME JOBID OWNER STATUS CLASS
BPXAS STC04218 START2 ACTIVE STC
PORTMAP STC04182 START2 ACTIVE STC
BPXAS STC04179 START2 ACTIVE STC
NFSC STC04171 START2 ACTIVE STC
CICSA STC04170 START2 ACTIVE STC
TCPIP STC04162 TCPIP ACTIVE STC
TN3270 STC04163 START2 ACTIVE STC
SDSF STC04160 START2 ACTIVE STC 1 spool files
TSO STC04158 START1 ACTIVE STC 1 spool files
INIT STC04157 START2 ACTIVE STC
TCPIP STC04162 TCPIP ACTIVE STC
VTAM STC04147 START1 ACTIVE STC
RACF STC04164 START2 ACTIVE STC
...
Retrieve thousands of members
-----------------------------
.. sourcecode :: python
Myzftp.get_members('SYS1.PARMLIB', '/tmp/parmlib/')
Myzftp.get_members('SYS1.LINKLIB', '/tmp/linklib/',
members='*', retr='binary', ftp_threads=10)
Get/put sequential text/binary z/OS file
----------------------------------------
.. sourcecode :: python
Myzftp.download_binary('SYS1.MAN1', '/tmp/smf.bin')
Myzftp.upload_text('/tmp/bigdata.txt', 'IBMUSER.BIGDATA',
sitecmd='lrecl=1024 cyl pri=500 sec=100')
Installation
============
The package is available as a Pip package:
``$ sudo pip install zosftplib``
Or using easy_install:
``$ sudo easy_install zosftplib``
Changelog
=========
2.0 - (2019-01-15)
1.0 - (2013-11-25)
Initial release.
| zosftplib | /zosftplib-2.0.tar.gz/zosftplib-2.0/zosftplib.rst | zosftplib.rst |
=======
zoslogs
=======
.. image:: https://img.shields.io/pypi/v/zoslogs.svg
:target: https://pypi.python.org/pypi/zoslogs
:alt: Pypi
.. image:: https://github.com/Tam-Lin/zoslogs/actions/workflows/build.yml/badge.svg
:target: https://github.com/Tam-Lin/zoslogs/actions/workflows/build.yml
:alt: Build Status
.. image:: https://readthedocs.org/projects/zoslogs/badge/?version=latest
:target: https://zoslogs.readthedocs.io/en/latest/?version=latest
:alt: Documentation Status
Library for parsing z/OS log files (syslog, operlog) up into individual messages. Because logs can be messy, and
authorized programs can write whatever they want to the log, by default it will discard anything that doesn't match
what a log entry should look like, and return whatever it can make sense of.
Please note that this was written to solve a problem I was having; it's by no means perfect, but it may solve a problem
you have, too, and I do plan on continuing to improve it as I have time. Pull requests and bug reports will certainly
be appreciated.
* Free software: Apache Software License 2.0
* Documentation: https://zoslogs.readthedocs.io.
Features
--------
* Handle compressed files
* Filtering messages
Credits
-------
Created by Kevin McKenzie
[email protected]
This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template.
.. _Cookiecutter: https://github.com/audreyr/cookiecutter
.. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
| zoslogs | /zoslogs-0.2.0.tar.gz/zoslogs-0.2.0/README.rst | README.rst |
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/Tam-Lin/zoslogs/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
zoslogs could always use more documentation, whether as part of the
official zoslogs docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/Tam-Lin/zoslogs/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `zoslogs` for local development.
1. Fork the `zoslogs` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/zoslogs.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv zoslogs
$ cd zoslogs/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 zoslogs tests
$ python setup.py test or pytest
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 3.6, 3.7, 3.8, 3.9 and 3.10. Check
https://github.com/Tam-Lin/zoslogs/workflows/check_pr.yaml
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ pytest tests.test_zoslogs
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bump2version patch # possible: major / minor / patch
$ git push
$ git push --tags
| zoslogs | /zoslogs-0.2.0.tar.gz/zoslogs-0.2.0/CONTRIBUTING.rst | CONTRIBUTING.rst |
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install zoslogs, run this command in your terminal:
.. code-block:: console
$ pip install zoslogs
This is the preferred method to install zoslogs, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for zoslogs can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/Tam-Lin/zoslogs
Or download the `tarball`_:
.. code-block:: console
$ curl -OJL https://github.com/Tam-Lin/zoslogs/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/Tam-Lin/zoslogs
.. _tarball: https://github.com/Tam-Lin/zoslogs/tarball/master
| zoslogs | /zoslogs-0.2.0.tar.gz/zoslogs-0.2.0/docs/installation.rst | installation.rst |
# ZOSPy
## About
Wrapper around the [Ansys Zemax OpticStudio](https://www.zemax.com/pages/opticstudio) API that provides a more pythonic and
intuitive way to interact with the [ZOS-API](https://www.zemax.com/blogs/free-tutorials/getting-started-with-zos-api)
through python using a .NET connection. It thereby allows you to do more
optics modelling with less coding.
In addition to full access to all the OpticStudio fucntions through the ZOS-API, ZOSPy provides the following features:
- Wrapper functions for several OpticStudio analyses in `zospy.analyses`;
- Easy access to solvers in `zospy.solvers`;
- Easy access to all API constants in `zospy.constants`;
- Autocomplete for all ZOS-API endpoints and constants.
## Waranty and liability
The code is provided as is, without any warranty. It is solely intended for research purposes. No warranty is given and
no rights can be derived from it, as is also stated in the [GNU General Public License Version 3](LICENSE.txt).
## Installing
ZOSPy is available on PyPi
```
pip install zospy
```
## Dependencies
ZOSPy officially supports Python 3.9 - 3.11. It may work with older Python versions, but support is not provided for
these versions.
### Python packages
- [Python for .NET](http://pythonnet.github.io/) 3.0.1
- [pandas](https://pandas.pydata.org/)
- [NumPy](https://numpy.org/)
### Software
- [Ansys Zemax OpticStudio](https://www.zemax.com/pages/opticstudio)
### Compatibility
> :warning: Version 1.0.0 introduced some breaking changes. See
> the [release notes](https://github.com/MREYE-LUMC/ZOSPy/releases/tag/v1.0.0) for more information.
ZOSPy is tested with the following versions of Python and Ansys Zemax OpticStudio:
| Zemax | 20.3.2 | 23.0.1 |
|-------------|--------|--------|
| Python 3.9 | ⚠ | ✔ |
| Python 3.10 | ⚠ | ✔ |
| Python 3.11 | ⚠ | ✔ |
✔: This version works without problems.
⚠: This version works, but the output of analyses can differ slightly from the used reference version (currently **OpticStudio 23 R1.01**).
## Referencing
When publishing results obtained with this package, please cite the paper in which the package was first used:<br>
van Vught L, Que I, Luyten GPM and Beenakker JWM.
_Effect of anatomical differences and intraocular lens design on Negative Dysphotopsia._
JCRS: Sep 06, 2022.
[doi: [10.1097/j.jcrs.0000000000001054](https://doi.org/10.1097/j.jcrs.0000000000001054) ] [[JCRS](https://journals.lww.com/jcrs/Abstract/9900/Effect_of_anatomical_differences_and_intraocular.107.aspx)]
If a direct reference of the package is also required, reference it using the following DOI:<br>
[](https://zenodo.org/badge/latestdoi/403590410)
## Contributing
Please read our [contribution guidelines](CONTRIBUTING.md) prior to opening a Pull Request.
## Basic usage
### Initiating connection
The connection as extension to running software OpticStudio is initiated as:
```python
import zospy as zp
zos = zp.ZOS()
zos.wakeup()
zos.connect_as_extension()
oss = zos.get_primary_system()
```
Make sure that the OpticStudio software is set up to be connected to as extension through the API. Alternatively, a
standalone OpticStudio application can be launched by changing the last two lines to:
```python
zos.create_new_application()
oss = zos.get_primary_system()
```
### Using solvers
Solvers for the Lens Data Editor are available through `zp.solvers`. Every solver requires a surface as its first
parameter.
#### Examples
```python
import zospy.solvers as solvers
surface = oss.LDE.GetSurfaceAt(2)
solvers.position(surface.ThicknessCell, from_surface=1, length=10)
```
### Performing analyses
Implemented analyses are available though `zp.analyses`. The available analyses are grouped in files that correspond to
the analysis groups in OpticStudio (e.g. `zp.analyses.mtf`and `zp.analyses.wavefront`). Every analysis requires the
OpticStudioSystem `oss` as first parameter.
#### Examples
```python
from zp.analyses.mtf import fft_through_focus_mtf
mtf = fft_through_focus_mtf(oss, sampling='64x64', deltafocus=0.1, oncomplete='Close')
```
```python
from zp.analyses.reports import cardinal_points
cp = cardinal_points(oss, surf1=3, surf2=4, oncomplete='Release')
```
A full description of the available function parameters is provided in the docstrings.
### Constants
After initiating the connection, all api constants are available through `zp.constants` (
e.g. `zp.constants.Editors.LDE.SurfaceType`). Note that these are only available after `zos.wakeup()` has been called,
as explained under **Initiating connection**.
### Convenience functions
Some convenience functions are available through `zp.functions`, e.g. to change a surface to a standard stuface:
```python
newsurf = oss.LDE.InsertNewSurfaceAt(0)
zp.functions.lde.surface_change_type(newsurf, 'Standard')
```
### Full example
This example creates a simple optical system consisting of a single lens.
```python
# Create a new, empty system
oss.new()
# Set aperture and wavelength
oss.SystemData.Aperture.ApertureType = zp.constants.SystemData.ZemaxApertureType.FloatByStopSize
oss.SystemData.Wavelengths.GetWavelength(1).Wavelength = 0.543 # in μm
# Set the object at infinity
surface_object = oss.LDE.GetSurfaceAt(0)
surface_object.Thickness = float("inf")
# Use a very small stop size, so the system is approximately paraxial
surface_stop = oss.LDE.GetSurfaceAt(1)
surface_stop.SemiDiameter = 0.1
# Add a lens with n = 1.5
lens_front = oss.LDE.InsertNewSurfaceAt(2)
lens_front.Comment = "lens front"
lens_front.Radius = 20
lens_front.Thickness = 1
zp.solvers.material_model(lens_front.MaterialCell, refractive_index=1.5)
lens_back = oss.LDE.InsertNewSurfaceAt(3)
lens_back.Comment = "lens back"
lens_back.Radius = -20
lens_back.Thickness = 19.792 # System is in focus
```
### Logging
Some basic logging is implemented through the
standard [python logging module](https://docs.python.org/3/library/logging.html) (but still under development). The
following implementation examples assume that `import logging` has been executed.
1. To enable logging output from all ZOSPy and other modules using logging.basicConfig:
```python
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
```
2. To enable logging output from all ZOSPy and other modules using a root logger:
```python
fmt = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh = logging.StreamHandler()
sh.setFormatter(fmt)
sh.setLevel(logging.DEBUG)
logger = logging.getLogger()
logger.addHandler(sh)
```
3. To enable logging output from only ZOSPy
```python
logging.getLogger('zospy').addHandler(logging.StreamHandler())
logging.getLogger('zospy').setLevel(logging.INFO)
```
## Contact
Feel free to contact us for any inquiries:
- L. van Vught ([email](mailto:[email protected]))
- J.W.M. Beenakker ([email](mailto:[email protected]))
- C. Haasjes ([email](mailto:[email protected]))
| zospy | /zospy-1.1.0.tar.gz/zospy-1.1.0/README.md | README.md |
import sys
from pdfminer.pdfparser import PDFDocument, PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf
from pdfminer.pdfdevice import PDFDevice, TagExtractor
from pdfminer.converter import XMLConverter, HTMLConverter, TextConverter
from pdfminer.cmapdb import CMapDB
from pdfminer.layout import LAParams
# main
def main(argv):
import getopt
def usage():
print ('usage: %s [-d] [-p pagenos] [-m maxpages] [-P password] [-o output] [-C] '
'[-n] [-A] [-V] [-M char_margin] [-L line_margin] [-W word_margin] [-F boxes_flow] '
'[-Y layout_mode] [-O output_dir] [-t text|html|xml|tag] [-c codec] [-s scale] file ...' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'dp:m:P:o:CnAVM:L:W:F:Y:O:t:c:s:')
except getopt.GetoptError:
return usage()
if not args: return usage()
# debug option
debug = 0
# input option
password = ''
pagenos = set()
maxpages = 0
# output option
outfile = None
outtype = None
outdir = None
layoutmode = 'normal'
codec = 'utf-8'
pageno = 1
scale = 1
caching = True
showpageno = True
laparams = LAParams()
for (k, v) in opts:
if k == '-d': debug += 1
elif k == '-p': pagenos.update( int(x)-1 for x in v.split(',') )
elif k == '-m': maxpages = int(v)
elif k == '-P': password = v
elif k == '-o': outfile = v
elif k == '-C': caching = False
elif k == '-n': laparams = None
elif k == '-A': laparams.all_texts = True
elif k == '-V': laparams.detect_vertical = True
elif k == '-M': laparams.char_margin = float(v)
elif k == '-L': laparams.line_margin = float(v)
elif k == '-W': laparams.word_margin = float(v)
elif k == '-F': laparams.boxes_flow = float(v)
elif k == '-Y': layoutmode = v
elif k == '-O': outdir = v
elif k == '-t': outtype = v
elif k == '-c': codec = v
elif k == '-s': scale = float(v)
#
PDFDocument.debug = debug
PDFParser.debug = debug
CMapDB.debug = debug
PDFResourceManager.debug = debug
PDFPageInterpreter.debug = debug
PDFDevice.debug = debug
#
rsrcmgr = PDFResourceManager(caching=caching)
if not outtype:
outtype = 'text'
if outfile:
if outfile.endswith('.htm') or outfile.endswith('.html'):
outtype = 'html'
elif outfile.endswith('.xml'):
outtype = 'xml'
elif outfile.endswith('.tag'):
outtype = 'tag'
if outfile:
outfp = file(outfile, 'w')
else:
outfp = sys.stdout
if outtype == 'text':
device = TextConverter(rsrcmgr, outfp, codec=codec, laparams=laparams)
elif outtype == 'xml':
device = XMLConverter(rsrcmgr, outfp, codec=codec, laparams=laparams, outdir=outdir)
elif outtype == 'html':
device = HTMLConverter(rsrcmgr, outfp, codec=codec, scale=scale,
layoutmode=layoutmode, laparams=laparams, outdir=outdir)
elif outtype == 'tag':
device = TagExtractor(rsrcmgr, outfp, codec=codec)
else:
return usage()
for fname in args:
fp = file(fname, 'rb')
process_pdf(rsrcmgr, device, fp, pagenos, maxpages=maxpages, password=password,
caching=caching, check_extractable=True)
fp.close()
device.close()
outfp.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv)) | zot | /zot-0.1.3.tar.gz/zot-0.1.3/pdf2txt.py | pdf2txt.py |
import os
import pdf2txt
import re
def format_author(author):
return '%s, %s' % author
class Item:
def __init__(self, item_dict={}):
self.__dict__ = item_dict
def __repr__(self):
return repr(self.__dict__)
def match(self, **kwargs):
if not 'keywords' in kwargs: kwargs['keywords'] = []
fields_to_search = {'title': 'title',
'pub': 'publicationTitle',
'author': 'author_str',
'year': 'year',
'collection': 'collections',
}
result = []
match_score = 0
for field, to_search in fields_to_search.items():
if not hasattr(self, to_search): continue
if field in kwargs: search_terms = kwargs['keywords'] + kwargs[field]
else: search_terms = kwargs['keywords']
text_to_search = getattr(self, to_search)
if isinstance(text_to_search, set): text_to_search = ','.join(text_to_search)
text_to_search = text_to_search
search_terms = [term for term in search_terms]
for search_term in search_terms:
match_score += len(re.findall(search_term, text_to_search))
return match_score
def author_string(self):
if not hasattr(self, 'authors'): return ''
authors = getattr(self, 'authors')
if len(authors) == 1: return format_author(authors[0])
elif len(authors) == 2: return ' and '.join([format_author(a) for a in authors])
else:
return ', '.join([format_author(authors[n]) if n < len(authors)-1 else 'and %s' % format_author(authors[n]) for n in range(len(authors))])
author_str = property(author_string)
def bibliography(self):
bib = self.author_string()
if hasattr(self, 'year'): bib += ' %s.' % self.year
if hasattr(self, 'title'): bib += ' %s.' % self.title
if hasattr(self, 'publicationTitle'): bib += ' %s' % self.publicationTitle
v = []
if hasattr(self, 'volume'): v += ['Vol. %s' % self.volume]
if hasattr(self, 'issue'): v += ['Issue %s' % self.issue]
if hasattr(self, 'pages'): v += ['Pages %s' % self.pages]
if v: bib += ' ' + (', '.join(v))
if v or hasattr(self, 'publicationTitle'): bib += '.'
if hasattr(self, 'doi'): bib += ' doi:%s' % self.doi
return bib
def citation(self):
citation = ''
if hasattr(self, 'authors'):
citation = (self.authors[0][0])
if len(self.authors) > 1: citation += ' et al.'
else: citation = self.title
if hasattr(self, 'year'): citation += ' %s.' % self.year
return citation
def format_filename(self, name, storage_dir):
return name.replace('storage:', os.path.join(storage_dir, self.key + '/'))
def get_full_text(self, storage_dir):
if hasattr(self, 'attachments'):
for attachment in self.attachments:
# TODO: read text from pdf
pdf2txt.main(['pdf2txt', self.format_filename(attachment, storage_dir)])
else:
return "No PDF attachments." | zot | /zot-0.1.3.tar.gz/zot-0.1.3/item.py | item.py |
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import sqlalchemy as sql
from item import Item
class Zotero:
def __init__(self, zotero_dir):
self.zotero_dir = zotero_dir
self.zotero_db_path = os.path.abspath(os.path.join(zotero_dir, 'zotero.sqlite'))
self.zotero_storage_path = os.path.join(zotero_dir, 'storage/')
if not os.path.exists(self.zotero_db_path):
raise Exception('Zotero db not found at %s.' % self.zotero_db_path)
self.db = sql.create_engine('sqlite:///' + self.zotero_db_path)
self.db.echo = False
self.metadata = sql.MetaData(self.db)
# tables
self.items = sql.Table('items', self.metadata, autoload=True)
self.fields = sql.Table('fields', self.metadata, autoload=True)
self.item_data = sql.Table('itemData', self.metadata, autoload=True)
self.item_data_values = sql.Table('itemDataValues', self.metadata, autoload=True)
self.item_attachments = sql.Table('itemAttachments', self.metadata, autoload=True)
self.collections = sql.Table('collections', self.metadata, autoload=True)
self.collection_items = sql.Table('collectionItems', self.metadata, autoload=True)
self.item_creators = sql.Table('itemCreators', self.metadata, autoload=True)
self.creators = sql.Table('creators', self.metadata, autoload=True)
self.creator_data = sql.Table('creatorData', self.metadata, autoload=True)
self.item_notes = sql.Table('itemNotes', self.metadata, autoload=True)
self.get_items()
def get_items(self):
# get all items and associated field names, and store in items dictionary
query = sql.select([self.items.c.key, self.fields.c.fieldName, self.item_data_values.c.value],
(self.items.c.itemID == self.item_data.c.itemID) &
(self.item_data.c.fieldID == self.fields.c.fieldID) &
(self.item_data.c.valueID == self.item_data_values.c.valueID)
)
result = query.execute()
items = {}
for key, field_name, value in result:
if not key in items: items[key] = {'key': key}
if field_name == 'date':
items[key]['year'] = value[:4]
else: items[key][field_name] = value
# get authors for these items
query = sql.select([self.items.c.key, self.creator_data.c.lastName, self.creator_data.c.firstName],
(self.items.c.itemID == self.item_creators.c.itemID) &
(self.creators.c.creatorID == self.item_creators.c.creatorID) &
(self.creators.c.creatorDataID == self.creator_data.c.creatorDataID)
)
result = query.execute()
for key, last, first in result:
if not key in items: items[key] = {'key': key}
if not 'authors' in items[key]: items[key]['authors'] = []
items[key]['authors'].append((last, first))
# get all PDF attachments for these items
query = sql.select([self.items.c.key, self.item_attachments.c.path],
(self.items.c.itemID == self.item_attachments.c.itemID) &
(self.item_attachments.c.mimeType == 'application/pdf')
)
result = query.execute()
for key, path in result:
if not key in items: items[key] = {'key': key}
if not 'attachments' in items[key]: items[key]['attachments'] = []
items[key]['attachments'].append(path)
# get all notes for these items
query = sql.select([self.items.c.key, self.item_notes.c.note],
(self.items.c.itemID == self.item_notes.c.itemID)
)
result = query.execute()
for key, note in result:
if not key in items: items[key] = {'key': key}
if not 'notes' in items[key]: items[key]['notes'] = []
items[key]['notes'].append(note)
# get all collections
query = sql.select([self.collections.c.collectionName, self.items.c.key],
(self.collections.c.collectionID == self.collection_items.c.collectionID) &
(self.collection_items.c.itemID == self.items.c.itemID)
)
result = query.execute()
collections = {}
for collection, key in result:
if not collection in collections: collections[collection] = set()
collections[collection].add(key)
if not key in items: items[key] = {'key': key}
if not 'collections' in items[key]: items[key]['collections'] = set()
items[key]['collections'].add(collection)
self.all_items = {k: Item(v) for k, v in items.items()}
def search(self, best=False, **kwargs):
matches = [(item, item.match(**kwargs)) for item in self.all_items.values()]
if not matches: return []
if best: return [sorted(matches, key = lambda m: m[1], reverse=True)[0][0]]
return [m[0] for m in filter(lambda m: m[1] > 0, matches)]
def read(self, keys):
pass
def notes(self, keys):
pass
def add_note(self, key, note_txt):
pass
def bib(self, keys):
pass
def help_msg():
print '''Usage: zot (command) (args)
Commands:
search [keywords]: return keys of all matching items
best [keywords]: return key of best search match
bib [keys]: view bibliography for one or more items
cit [keys]: view citations for one or more items
files [keys]: view all attached files for one or more items
read [keys]: view text content of attached PDFs for one or more items
notes [keys]: view all notes for one or more items
path (zotero_dir): set the path to your Zotero directory
help'''
def main():
if len(sys.argv) < 2:
help_msg()
return
command = sys.argv[1].lower()
if command == 'help':
help_msg()
return
elif command == 'path':
arg = sys.argv[2]
from settings import write_zotero_dir
write_zotero_dir(arg)
sys.exit()
from settings import get_zotero_dir
zotero_dir = get_zotero_dir()
z = Zotero(zotero_dir)
if len(sys.argv) > 2: args = sys.argv[2:]
else:
args = []
for line in sys.__stdin__:
args.append(line.strip())
if command in ('search', 'best'):
search_args = {}
n = 0
set_arg = 'keywords'
while n < len(args):
if not set_arg in search_args: search_args[set_arg] = []
arg = args[n]
if arg.startswith('--'): set_arg = arg[2:]
else: search_args[set_arg].append(arg)
n += 1
result = z.search(best=command=='best', **search_args)
for i in result:
print getattr(i, 'key')
elif command in ('bib', 'bibliography'):
for result in [z.all_items[key].bibliography() for key in args]:
print result
elif command in ('cit', 'cite', 'citation'):
for result in [z.all_items[key].citation() for key in args]:
print result
elif command == 'files':
for result in [z.all_items[key] for key in args]:
if hasattr(result, 'attachments'):
for attachment in result.attachments:
print result.format_filename(attachment, z.zotero_storage_path)
elif command == 'read':
for key in args:
z.all_items[key].get_full_text(z.zotero_storage_path)
elif command == 'notes':
for result in [z.all_items[key] for key in args]:
if hasattr(result, 'notes'):
for note in result.notes: print note
elif command == 'debug':
for i in [item.__dict__ for item in z.all_items.values()]: print i
else:
help_msg()
return
if __name__ == '__main__':
main() | zot | /zot-0.1.3.tar.gz/zot-0.1.3/zot.py | zot.py |
=================================================
zot4rst: Zotero for reStructuredText (docutils)
=================================================
Background
~~~~~~~~~~
Zotero_ is a useful tool for managing citations.
zot4rst is an extension to the Python docutils_ package for including
citations in reStructuredText_ documents.
zot4rst is developed under Linux, has been tested on Windows, and
should run under Mac OS.
Installation
~~~~~~~~~~~~
1. Install Zotero_.
2. Download and install zotxt:
https://bitbucket.org/egh/zotxt/downloads/zotxt.xpi
3. Install zot4rst::
sudo python setup.py install
Quickstart
~~~~~~~~~~
See ``example/example.rst``, and the generated ``example/example.pdf``
and ``example/example.html``. Citation syntax is identical to pandoc.
zot4rst automatically maps citation keys (e.g., @DoeTitle2010) to
entries in the zotero database. The key should be of the form
@AuthorTitleDate. So, for the item:
John Doe, “Article,” Journal of Generic Studies, 2006.
You could use: @DoeArticle2006. This should be easy to use, but the
reference needs to be unambiguous, which might be a problem if there
are multiple items with the same author, title, and year. I am looking
into ways to handle this better.
To include Zotero_ citations in a reStructuredText_ document, you must
use the bundled ``zrst2*`` scripts, which have been modified to
include support for ``zotero`` directives. These executables are
installed using ``setup.py`` above. Currently, they are:
- ``zrst2html``
- ``zrst2odt``
- ``zrst2pdf``
- ``zrst2pseudoxml``
- ``zrst2rst``
Sphinx
~~~~~~
To use in sphinx, simply add the ``zot4rst.sphinx`` extension to your
``conf.py`` file::
extensions = ['zot4rst.sphinx']
Pelican
~~~~~~~
To use in pelican_ (version 3.1 or later), add the following to your
``pelicanconf.py`` file:
PLUGINS = ['zot4rst.pelican_plugin',]
Details
~~~~~~~
Some details, in no particular order.
Note that ``zrst2rst`` will transform your citations into plain
reStructuredText files without the Zotero extension. For example::
A citation group :xcite:`[see @item1 p. 34-35; also @item3 chap. 3]`.
will become::
A citation group (see Doe 2005, p. 34–35; also Doe and Roe 2007,
chap. 3).
and the bibliography will be fully expanded. This can be used to
create RST files that will work without zot4rst.
If you use a footnote citation format, zot4rst will insert footnotes
for you.
However, if you also use regular autonumbered footnotes in the same
section or paragraph, the ordering will be wrong. So if you want to do
this, you will need to put your citations in a footnote
explicitly. For example::
Water is wet. [#]_ But there are those who dispute it. [#]_
.. [#] :xcite:`[See @item3]`.
.. [#] These people are wrong.
.. _Zotero: http://www.zotero.org/
.. _`org-mode`: http://orgmode.org/
.. _reStructuredText: http://docutils.sourceforge.net/rst.html
.. _docutils: http://docutils.sourceforge.net/
.. _`docutils snapshot`: http://docutils.sourceforge.net/docutils-snapshot.tgz
.. _`sphinx bibtex`: http://sphinxcontrib-bibtex.readthedocs.org/
.. _pelican: https://github.com/getpelican/pelican/
| zot4rst | /zot4rst-0.3.0.tar.gz/zot4rst-0.3.0/README.rst | README.rst |
import BeautifulSoup
import re
import xciterst
from docutils import nodes
def html2rst (html):
"""
Transform html to reStructuredText internal representation.
reStructuredText inline markup cannot be nested. The CSL processor
does produce nested markup, so we ask the processor to deliver HTML,
and use this function to convert it to the internal representation.
It depends on Beautiful Soup.
Note that the function supports small-caps, with the smallcaps
node name. The Translator instance used by the Writer that consumes
the output must be extended to support this node type.
"""
def cleanString(str):
"""
Replace HTML entities with character equivalents.
Only these four characters are encoded as entities by the CSL
processor when running in HTML mode.
"""
str = str.replace("&", "&")
str = str.replace("<", "<")
str = str.replace(" ", ">")
str = str.replace(" ", u"\u00A0")
return str
def is_empty_paragraph(node):
if isinstance(node, nodes.paragraph):
t = node.astext()
return t == ' ' or t == ''
else:
return False
def wrap_text(node_list):
# in rst text must be wrapped in a paragraph, I believe
# at least rst2pdf disappears the text if it is not - EGH
retval = []
last_was_text = False
# group text nodes in paragraphs
for node in node_list:
if isinstance(node, nodes.Inline) or isinstance(node, nodes.Text):
if last_was_text:
retval[-1] += node
else:
retval.append(nodes.paragraph("","", node))
last_was_text = True
else:
retval.append(node)
last_was_text = False
return [ n for n in retval if not(is_empty_paragraph(n)) ]
def compact(lst):
return [ x for x in lst if (x is not None) ]
def walk(html_node):
"""
Walk the tree, building a reStructuredText object as we go.
"""
if html_node is None:
return None
elif ((type(html_node) == BeautifulSoup.NavigableString) or (type(html_node) == str) or (type(html_node) == unicode)
):
# Terminal nodes
text = cleanString(unicode(html_node))
# whitespace is significant in reST, so normalize empties to a single space
if re.match("^\s+$", text):
return nodes.Text(" ")
else:
return nodes.Text(text)
else:
# Nesting nodes.
if (html_node.name == 'span'):
ret = None
if (html_node.has_key('style') and (html_node['style'] == "font-style:italic;")):
children = compact([walk(c) for c in html_node.contents])
return nodes.emphasis("", "", *children)
elif (html_node.has_key('style') and (html_node['style'] == "font-variant:small-caps;")):
children = compact([walk(c) for c in html_node.contents])
return xciterst.smallcaps("", "", *children)
elif (html_node.has_key('style') and (html_node['style'] == "font-style:normal;")):
children = compact([walk(c) for c in html_node.contents])
return nodes.emphasis("", "", *children)
else:
children = compact(walk("".join([ str(c) for c in html_node.contents ])))
return nodes.generated("", "", *children)
if (html_node.name == 'i'):
children = compact([walk(c) for c in html_node.contents])
return nodes.emphasis("", "", *children)
elif (html_node.name == 'b'):
children = compact([walk(c) for c in html_node.contents ])
return nodes.strong("", "", *children)
elif (html_node.name == 'p'):
children = compact([ walk(c) for c in html_node.contents ])
return nodes.paragraph("", "", *children)
elif (html_node.name == 'a'):
children = compact([ walk(c) for c in html_node.contents ])
return apply(nodes.reference, ["", ""] + children, { 'refuri' : html_node['href'] })
elif (html_node.name == 'div'):
children = compact([ walk(c) for c in html_node.contents ])
classes = re.split(" ", html_node.get('class', ""))
return nodes.container("", *wrap_text(children), classes=classes)
doc = BeautifulSoup.BeautifulSoup(html)
ret = compact([ walk(c) for c in doc.contents ])
return ret | zot4rst | /zot4rst-0.3.0.tar.gz/zot4rst-0.3.0/xciterst/util.py | util.py |
import docutils
import random
import string
import xciterst
from xciterst.parser import CiteParser
from xciterst.directives import CitationTransform
def handle_cite_cluster(inliner, cite_cluster):
document = inliner.document
xciterst.cluster_tracker.track(cite_cluster)
if xciterst.citeproc.in_text_style or \
(type(inliner.parent) == docutils.nodes.footnote):
# already in a footnote, or in-text style: just add a pending
pending = docutils.nodes.pending(CitationTransform)
pending.details['cite_cluster'] = cite_cluster
document.note_pending(pending)
return pending
else:
# not in a footnote & this is a footnote style; insert a
# reference & add a footnote to the end
label = "".join(random.choice(string.digits) for x in range(20))
# Set up reference
refnode = docutils.nodes.footnote_reference('[%s]_' % label)
refnode['auto'] = 1
refnode['refname'] = label
document.note_footnote_ref(refnode)
document.note_autofootnote_ref(refnode)
# Set up footnote
footnote = docutils.nodes.footnote("")
footnote['auto'] = 1
footnote['names'].append(label)
pending = docutils.nodes.pending(CitationTransform)
pending.details['cite_cluster'] = cite_cluster
paragraph = docutils.nodes.paragraph()
paragraph.setup_child(pending)
paragraph += pending
footnote.setup_child(paragraph)
footnote += paragraph
document.note_pending(pending)
document.note_autofootnote(footnote)
# Temporarily stash footnote as a child of the refnode
refnode.setup_child(footnote)
refnode += footnote
return refnode
def cite_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
"""Text role for citations."""
xciterst.check_citeproc()
[first_cluster, second_cluster] = CiteParser().parse(text)
nodeset = []
if first_cluster is not None:
nodeset.append(handle_cite_cluster(inliner, first_cluster))
nodeset.append(docutils.nodes.Text(" ", rawsource=" "))
nodeset.append(handle_cite_cluster(inliner, second_cluster))
return nodeset, []
docutils.parsers.rst.roles.register_canonical_role('xcite', cite_role) | zot4rst | /zot4rst-0.3.0.tar.gz/zot4rst-0.3.0/xciterst/roles.py | roles.py |
import docutils
import docutils.transforms
import xciterst
class BibliographyDirective(docutils.parsers.rst.Directive):
"""Directive for bibliographies."""
## This could be extended to support selection of
## included bibliography entries. The processor has
## an API to support this, although it hasn't yet been
## implemented in any products that I know of.
required_arguments = 0
optional_arguments = 1
has_content = False
def run(self):
pending = docutils.nodes.pending(BibliographyTransform)
pending.details.update(self.options)
self.state_machine.document.note_pending(pending)
return [pending]
class BibliographyTransform(docutils.transforms.Transform):
"""Transform which generates a bibliography. Wait for all items to
be registered, then we generate a bibliography."""
default_priority = 700
def apply(self):
self.startnode.replace_self(xciterst.citeproc.generate_rest_bibliography())
class FootnoteSortTransform(docutils.transforms.Transform):
default_priority = 641
def apply(self):
# Footnotes inserted via xcite are numbered before
# normal reST auto-numbered footnotes, so we renumber
# them as a single set, according to order of appearance
# of the refs in text, taking care to keep the
# ref and footnote numbering lined up.
footnotemap = {}
footnotes = self.document.autofootnotes
for i in range(0, len(self.document.autofootnotes), 1):
footnotemap[footnotes[i]['ids'][0]] = i
newlist = []
refs = self.document.autofootnote_refs
for i in range(0, len(refs), 1):
newlist.append(footnotes[footnotemap[refs[i]['refid']]])
self.document.autofootnotes = newlist
# The lists are now congruent and in document order, but the
# footnote numbers are screwed up, and the notes themselves
# may be in the wrong position.
# Reassign numbers to the footnotes
for i in range(0, len(self.document.autofootnotes), 1):
label = self.document.autofootnotes[i].children[0]
oldnum = label.children[0]
newnum = docutils.nodes.Text(str(i + 1))
label.replace(oldnum, newnum)
# Move the footnotes themselves to a more sensible location
# get the footnote label
for i in range(0, len(self.document.autofootnotes), 1):
footnote_node = self.document.autofootnotes[i]
ref_node = self.document.autofootnote_refs[i]
footnote_node.parent.remove(footnote_node)
footnotes_at_end = getattr(self.document.settings, 'footnotes_at_end', 1)
if footnotes_at_end:
self.document += footnote_node
self.document.setup_child(footnote_node)
else:
ref_parent = ref_node.parent
ref_and_note = docutils.nodes.generated()
ref_and_note += ref_node
ref_and_note.setup_child(ref_node)
ref_and_note += footnote_node
ref_and_note.setup_child(footnote_node)
ref_parent.replace(ref_node, ref_and_note)
ref_parent.setup_child(ref_and_note)
# Reassign numbers to the refs
# (we don't touch these until now because they may contain
# trojan footnotes)
for i in range(0, len(self.document.autofootnote_refs), 1):
ref = self.document.autofootnote_refs[i]
if len(ref.children) == 2:
ref.children.pop(0)
oldnum = ref.children[0]
newnum = docutils.nodes.Text(str(i + 1))
ref.replace(oldnum, newnum)
for i in range(0, len(self.document.autofootnotes), 1):
footnote = self.document.autofootnotes[i]
for child in footnote.children:
for grandchild in child.children:
if isinstance(grandchild, docutils.nodes.pending):
cluster = grandchild.details['cite_cluster']
cluster.note_index = i
empty = docutils.nodes.generated()
self.startnode.replace_self(empty)
class CitationTransform(docutils.transforms.Transform):
#
# Before Footnote
#
default_priority = 538
def apply(self):
cite_cluster = self.startnode.details['cite_cluster']
next_pending = docutils.nodes.pending(CitationSecondTransform)
next_pending.details['cite_cluster'] = cite_cluster
self.document.note_pending(next_pending)
self.startnode.replace_self(next_pending)
class CitationSecondTransform(docutils.transforms.Transform):
"""Second pass transform for a citation. We use two passes because
we want to generate all the citations in a batch, and we need to
get the note indexes first."""
#
# After Footnote (to pick up the note number)
#
default_priority = 650
def apply(self):
cite_cluster = self.startnode.details['cite_cluster']
footnote_node = self.startnode.parent.parent
if type(footnote_node) == docutils.nodes.footnote:
cite_cluster.note_index = int(str(footnote_node.children[0].children[0]))
cite_cluster = self.startnode.details['cite_cluster']
newnode = xciterst.citeproc.get_citation(cite_cluster)
self.startnode.replace_self(newnode)
docutils.parsers.rst.directives.register_directive('bibliography', BibliographyDirective) | zot4rst | /zot4rst-0.3.0.tar.gz/zot4rst-0.3.0/xciterst/directives.py | directives.py |
import re
import xciterst
class CitationInfo(object):
"""Class to hold information about a citation for passing to
citeproc."""
def __init__(self, citekey, label=None, locator=None, suppress_author=False, prefix=None, suffix=None, author_only=False, theid=None):
self.citekey = citekey
self.label = label
self.locator = locator
self.suppress_author = suppress_author
self.prefix = prefix
if self.prefix:
self.prefix = re.sub(r'\s+,', ',', self.prefix)
self.suffix = suffix
if self.suffix:
self.suffix = re.sub(r'\s+,', ',', self.suffix)
self.author_only = author_only
self.id = theid
def __str__(self):
if self.suppress_author: suppress_str = "-"
else: suppress_str = ""
return "%s %s%s(%s) %s"%(self.prefix, suppress_str, self.citekey, self.locator, self.suffix)
def __repr__(self):
return "CitationInfo(%s)"%(repr({
"citekey" : self.citekey,
"label" : self.label,
"locator" : self.locator,
"suppress_author" : self.suppress_author,
"prefix" : self.prefix,
"suffix" : self.suffix,
"author_only" : self.author_only,
"id" : self.id}))
def __eq__(self, other):
return (isinstance(other, CitationInfo) and
(self.citekey == other.citekey) and
(self.label == other.label) and
(self.locator == other.locator) and
(self.suppress_author == other.suppress_author) and
(self.prefix == other.prefix) and
(self.suffix == other.suffix) and
(self.author_only == other.author_only))
class CitationCluster(object):
"""Class to hold a cluster of citations, with information about
them suitable for submission to citeproc."""
def __init__(self, citations):
self.citations = citations
self.note_index = 0
self.index = 0
def __eq__(self, other):
return (isinstance(other, CitationCluster) and
(self.citations == other.citations) and
(self.note_index == other.note_index) and
(self.index == other.index))
def __repr__(self):
return "CitationCluster(%s)"%(repr(self.citations)) | zot4rst | /zot4rst-0.3.0.tar.gz/zot4rst-0.3.0/xciterst/citations.py | citations.py |
import xciterst
import spidermonkey
import json
import os.path
class Citeproc(xciterst.CiteprocWrapper):
def js_path(self, p):
return os.path.join(os.path.dirname(__file__), 'js', p)
def js_exec(self, p):
return self.context.execute(open(self.js_path(p)).read())
def __init__(self):
rt = spidermonkey.Runtime()
self.context = rt.new_context()
self.js_exec('xmle4x.js')
self.js_exec('citeproc.js')
locale = open("../citeproc-js/locale/locales-en-US.xml").read()
localeJSON = json.dumps(locale,ensure_ascii=False)
self.context.execute('locale_en = %s;' % localeJSON)
self.js_exec('sys.js')
self.js_exec("abbreviations.js")
# Unneeded in this context
#self.context.execute('styleName = \"%s\";' % name)
# Pull in csl through format declaration
#csl = open('../mlz-styles/mlz-%s.csl' % name).read()
#cslJSON = json.dumps(csl,ensure_ascii=False)
#self.context.execute('csl = %s;' % cslJSON)
# Instantiate engine through set format declaration
#self.context.execute('sys = new MySys();')
#self.context.execute('citeproc = new CSL.Engine(sys,csl);')
# Use explicit bibliography loading
#itemsJSON = open('./json/items.json' % name).read()
#self.context.execute('citeproc.sys._cache = %s;' % itemsJSON)
cite = [""];
self.context.add_global("cite", cite)
monitor = [""];
self.context.add_global("monitor", monitor)
self.is_in_text_style = self.context.execute("('in-text' === citeproc.opt.xclass);");
def citeproc_update_items(self, ids):
"""Call updateItems in citeproc."""
return self.context.execute("citeproc.updateItems(%s)" % json.dumps(ids))
def citeproc_make_bibliography(self):
"""Call makeBibliography in citeproc. Should return an HTML string."""
pass
def citeproc_append_citation_cluster_batch(self, clusters):
"""Call appendCitationCluster for a batch of citations."""
pass
def instantiateCiteProc(self, format):
m = re.match(".*/(?:mlz-)*(.*)(?:\.csl)*$", format)
if m:
format = m.group(1)
csl = open('../mlz-styles/mlz-%s.csl' % format).read()
cslJSON = json.dumps(csl,ensure_ascii=False)
self.context.execute('csl = %s;' % cslJSON)
self.context.execute('sys = new MySys();')
self.context.execute('citeproc = new CSL.Engine(sys,csl);')
support = open('./js/citeproc-support.js').read()
self.context.execute(support)
# For debugging -- allows print() to be used in citeproc.js
def printme (txt):
print txt
self.context.add_global("print", printme) | zot4rst | /zot4rst-0.3.0.tar.gz/zot4rst-0.3.0/xciterst/spidermonkey.py | spidermonkey.py |
import re
import sys
from pyparsing import Group, OneOrMore, Optional, Regex, White, Word, ZeroOrMore
from xciterst.citations import CitationInfo, CitationCluster
class CiteParser(object):
class Base():
def __init__(self, name, content):
self.content = content
self.name = name
def __str__(self):
if type(self.content) == list:
return "%s(%s)"%(self.name, ", ".join([ str(c) for c in self.content]))
else:
return "%s(%s)"%(self.name, self.content)
class Locator(Base):
def __init__(self, content):
CiteParser.Base.__init__(self, "Locator", content)
class Suffix(Base):
def __init__(self, content):
CiteParser.Base.__init__(self, "Suffix", content)
class Prefix(Base):
def __init__(self, content):
CiteParser.Base.__init__(self, "Prefix", content)
class CiteKey(Base):
def __init__(self, toks):
self.suppress_author = False
if len(toks) == 3:
self.suppress_author = True
self.citekey = toks[-1]
CiteParser.Base.__init__(self, "CiteKey", self.citekey)
class FullCite(Base):
def __init__(self, toks):
CiteParser.Base.__init__(self, "FullCite", toks.asList())
class ShortCite(Base):
def __init__(self, toks):
self.suppress_author = False
if len(toks) == 3:
self.suppress_author = True
self.citekey = toks[-1]
CiteParser.Base.__init__(self, "ShortCite", self.citekey)
class ShortCiteExtra(Base):
def __init__(self, toks):
CiteParser.Base.__init__(self, "ShortCiteExtra", toks.asList())
def _results2cites(self, pieces, cites=None, current_cite=None):
if cites is None: cites = [None, CitationCluster([])]
prefix = None
for piece in pieces:
if isinstance(piece, CiteParser.ShortCite):
# actually 2 cites, first author-only, then suppress-author
first = CitationInfo(citekey=piece.citekey,
author_only=True)
current_cite = CitationInfo(citekey=piece.citekey,
suppress_author=True)
cites[0] = CitationCluster([first])
cites[1].citations.append(current_cite)
elif isinstance(piece, CiteParser.CiteKey):
current_cite = CitationInfo(citekey=piece.citekey,
suppress_author=piece.suppress_author,
prefix=prefix)
cites[1].citations.append(current_cite)
elif isinstance(piece, CiteParser.Prefix):
prefix = piece.content
elif isinstance(piece, CiteParser.Locator):
current_cite.locator = piece.content
elif isinstance(piece, CiteParser.Suffix):
current_cite.suffix = piece.content
elif isinstance(piece, CiteParser.ShortCiteExtra):
self._results2cites(piece.content, cites, current_cite)
elif isinstance(piece, CiteParser.FullCite):
self._results2cites(piece.content, cites)
return cites
def parse(self, what):
WORD_CHAR_RE = r'[\w.,\'\"\(\)</>-]'
greedyToken = Regex(r'%s+'%(WORD_CHAR_RE))
wordWithDigits = Regex(r'%s*[0-9]%s*'%(WORD_CHAR_RE, WORD_CHAR_RE))
# translate embedded emph & strong RST to HTML
emText = '*' + OneOrMore(greedyToken) + '*'
emText.setParseAction(lambda s,l,t:
"<i>%s</i>"%(" ".join(t[1:-1])))
strongText = '**' + OneOrMore(greedyToken) + '**'
strongText.setParseAction(lambda s,l,t:
"<b>%s</b>"%(" ".join(t[1:-1])))
text = strongText | emText | greedyToken
locator = (Optional(',') + OneOrMore(wordWithDigits)) ^ (Optional(',') + Optional(greedyToken) + OneOrMore(wordWithDigits))
def locator_parse_action(s, l, t):
raw = " ".join(t)
# strip leading comma
return CiteParser.Locator(re.sub('^,\s+', '', raw))
locator.setParseAction(locator_parse_action)
citeKey = Optional('-') + '@' + Regex(r'[\w-]+')
citeKey.setParseAction(lambda s,l,t: CiteParser.CiteKey(t))
# suffix comes after a cite
suffix = OneOrMore(text)
suffix.setParseAction(lambda s,l,t: CiteParser.Suffix(" ".join(t)))
# prefix comes before a cite
prefix = OneOrMore(text)
prefix.setParseAction(lambda s,l,t: CiteParser.Prefix(" ".join(t)))
# a short cite, author + (date)
shortCite = Optional('-') + '@' + Regex(r'[\w-]+')
shortCite.setParseAction(lambda s,l,t: CiteParser.ShortCite(t))
# a full & complete cite (for use in brackets)
fullCite = (citeKey | (prefix + citeKey)) + Optional(locator) + Optional(suffix)
fullCite.setParseAction(lambda s,l,t: CiteParser.FullCite(t))
restCite = ';' + fullCite
bracketedCite = ('[' + fullCite + ZeroOrMore(restCite) + ']')
shortCiteExtra = ('[' + locator + Optional(suffix) + ZeroOrMore(restCite) + ']')
shortCiteExtra.setParseAction(lambda s,l,t: CiteParser.ShortCiteExtra(t))
topCite = bracketedCite ^ shortCite + shortCiteExtra ^ shortCite + bracketedCite ^ shortCite
raw = topCite.parseString(what, True)
return self._results2cites(list(raw)) | zot4rst | /zot4rst-0.3.0.tar.gz/zot4rst-0.3.0/xciterst/parser.py | parser.py |
import docutils
import itertools
from xciterst.util import html2rst
import xciterst
from xciterst.parser import CiteParser
from docutils.parsers.rst import roles
import sys
def check_citeproc():
if not xciterst.citeproc:
## A kludge, but makes a big noise about the extension syntax for clarity.
sys.stderr.write("#####\n")
sys.stderr.write("##\n")
sys.stderr.write("## Must setup a citeproc directive before xcite role is used.\n")
sys.stderr.write("##\n")
sys.stderr.write("#####\n")
raise docutils.utils.ExtensionOptionError("must set a citeproc directive before xcite role is used.")
class ClusterTracker(object):
"""Class used to track citation clusters."""
def __init__(self):
self.clusters = []
def get(self):
return self.clusters
def track(self, cluster):
self.clusters.append(cluster)
index = len(self.clusters) - 1
cluster.index = index
# tracker for clusters
cluster_tracker = ClusterTracker()
class CiteprocWrapper(object):
"""Class which represents a citeproc instance."""
def __init__(self):
self.citations = None
def generate_rest_bibliography(self):
"""Generate a bibliography of reST nodes."""
clusters = xciterst.cluster_tracker.get()
bibdata = self.citeproc_process(clusters)[1]
if not(bibdata):
return html2rst("")
else:
return html2rst("%s%s%s"%(bibdata[0]["bibstart"], "".join(bibdata[1]), bibdata[0]["bibend"]))
def cache_citations(self):
if (self.citations is None):
clusters = xciterst.cluster_tracker.get()
html = self.citeproc_process(clusters)[0]
self.citations = [ html2rst(n) for n in html ]
def get_citation(self, cluster):
self.cache_citations()
return self.citations[cluster.index]
# override in subclass
def citeproc_process(self, citations):
"""Return (citations, bibliograph)."""
pass
# placeholder for citeproc instance
citeproc = None
citekeymap = None
class smallcaps(docutils.nodes.Inline, docutils.nodes.TextElement): pass
roles.register_local_role("smallcaps", smallcaps) | zot4rst | /zot4rst-0.3.0.tar.gz/zot4rst-0.3.0/xciterst/__init__.py | __init__.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.