repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
kickapoo/prometheus | prometheus/__init__.py | 1 | 1867 | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.moment import Moment
from flask.ext.bootstrap import Bootstrap
import pyowm
app = Flask(__name__, instance_relative_config=True)
# config.default
app.config.from_object('config.default')
# config.prometheues-settings.py
app.config.from_object('config.prometheus-settings')
db = SQLAlchemy(app)
login_manager = LoginManager(app)
moment = Moment(app)
bootstrap = Bootstrap(app)
owm = pyowm.OWM(app.config['OWM_KEY'])
# Landing_page hold a single view
from blueprints.landing_page import landing_page as landing_page_blueprint
app.register_blueprint(landing_page_blueprint, url_prefix='/')
# Auth holds all login/logout/registration actions.
# Auth uses 'User' model with NO relation mapping to app.models.core
# Authentication is made with the help of Flask-Login
from blueprints.auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix='/auth')
# Coordinator holds an 'admin' panel for coordinators
# A coordinator can create/edit/delete (CRUD),
# Team/Voluntter/Needs to database
from blueprints.coordinators import coordinator as coordinator_blueprint
app.register_blueprint(coordinator_blueprint, url_prefix='/coordinator')
# Volunter holds a single page in order potential volunteer to select his/her
# daily contribution in Needs.
from blueprints.volunteers import volunteer as volunteer_blueprint
app.register_blueprint(volunteer_blueprint, url_prefix='/volunteer')
# Flatpages holds terms-of-use, policy etc
from blueprints.flatpages import flatpages as flatpages_blueprint
app.register_blueprint(flatpages_blueprint, url_prefix='/flatpages')
# Prometheus api using Basic AuthO Authentication
from blueprints.api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
| gpl-3.0 | 4,680,024,369,101,816,000 | 38.723404 | 77 | 0.803964 | false |
shuitian/pokemon_rpg | Assets/server/sqldata.py | 1 | 2027 | #-*- coding:utf-8 –*-
import sqlite3,os,time,json
class sql(object):
"""处理数据库的类"""
def __init__(self):
"""获取数据库连接"""
super(sql, self).__init__()
db = 'monsters.db'
self.conn = sqlite3.connect(db)
print "Open",db,"Success"
def __del__(self):
"""关闭数据库连接"""
self.conn.close()
def show_table(self, table_name):
"""显示表中所有数据"""
if table_name == None:
table_name = "None"
table = self.execute("SELECT * from " + table_name)
if table != None:
print table.fetchall()
def execute(self, seq):
"""执行数据库语句"""
# print seq
return self.conn.execute(seq)
def get_monster(self, id):
"""在MONSTER表中查询数据"""
table = self.execute("SELECT * from MONSTER where id =" + str(id))
if table != None:
return table.fetchone()
def get_item(self, id):
"""在ITEM表中查询数据"""
table = self.execute("SELECT * from item where id =" + str(id))
if table != None:
return table.fetchone()
keys = ["id",'name','hp','attack','defence','gold']
def get_item_json(self, id):
d = {"type":"item"}
l = self.get_item(id)
if(l != None):
d['body'] = dict(zip(self.keys,l))
return json.dumps(d)
def get_monster_json(self, id):
d = {"type":"monster"}
l = self.get_monster(id)
if(l != None):
d['body'] = dict(zip(self.keys,l))
return json.dumps(d)
def get_json_from_message(connection, string):
d = json.loads(string)
if d["type"] == 'item':
return connection.get_item_json(d['body']['id'])
elif d["type"] == 'monster':
return connection.get_monster_json(d['body']['id'])
if __name__ == '__main__':
"""创建怪物表"""
s = sql()
# s.get_monster(1)
# s.get_item(1)
# dict1 = {}
# dict1['type'] = "monster"
# table = s.execute("SELECT * from MONSTER where id =" + str(1))
# dict1['body'] = dict(zip(["id",'name','hp','attack','defence','gold'],table.fetchone()))
# print json.dumps(dict1)
print s.get_item_json(1),
print get_json_from_message(s, s.get_item_json(1))
| mit | 71,420,953,494,727,550 | 23.551282 | 91 | 0.602089 | false |
jsonchin/nba_stats_scraper_db_storage | nba_ss_db/scrape/query_param_values.py | 1 | 2097 | """
Contains functions to retrieve fillable values for
API request jobs.
"""
import datetime
from .. import db, CONFIG
from ..scrape.utils import get_date_before, format_date_for_api_request, PROPER_DATE_FORMAT
QUERY_PARAM_VALUES = {}
def get_possible_query_param_values(query_param, is_daily):
"""
Valid query parameters are:
- {SEASON}
- {PLAYER_POSITION}
- {GAME_ID}
- {PLAYER_ID}
- {GAME_DATE}
- {DATE_TO}
The last four return a dictionary mapping season to possible values.
All other query parameters return a list of values to iterate through.
"""
if query_param not in QUERY_PARAM_VALUES:
if query_param == '{SEASON}':
values = CONFIG['SEASONS']
elif query_param == '{PLAYER_ID}':
values = db.retrieve.fetch_player_ids()
elif query_param == '{GAME_DATE}':
values = db.retrieve.fetch_game_dates()
elif query_param == '{DATE_TO}':
values = db.retrieve.fetch_game_dates()
for season in values:
for i in range(len(values[season])):
game_date = values[season][i]
date_before = get_date_before(game_date)
values[season][i] = format_date_for_api_request(date_before)
elif query_param == '{GAME_ID}':
values = db.retrieve.fetch_game_ids()
elif query_param == '{PLAYER_POSITION}':
values = ['G', 'F', 'C']
else:
raise ValueError(
'Unsupported fillable type: {}'.format(query_param))
QUERY_PARAM_VALUES[query_param] = values
if is_daily:
if query_param == '{SEASON}':
return [CONFIG['CURRENT_SEASON']]
elif query_param == '{DATE_TO}':
today_date = datetime.datetime.today().strftime(PROPER_DATE_FORMAT)
prev_dates = QUERY_PARAM_VALUES[query_param][CONFIG['CURRENT_SEASON']]
return {CONFIG['CURRENT_SEASON']: prev_dates + [format_date_for_api_request(get_date_before(today_date))]}
return QUERY_PARAM_VALUES[query_param]
| apache-2.0 | -6,235,656,609,852,025,000 | 35.155172 | 118 | 0.592752 | false |
wheeler-microfluidics/open-drop | pavement.py | 1 | 1896 | from collections import OrderedDict
import sys
from importlib import import_module
from paver.easy import task, needs, path, sh, cmdopts, options
from paver.setuputils import setup, find_package_data, install_distutils_tasks
try:
from base_node_rpc.pavement_base import *
except ImportError:
pass
sys.path.insert(0, '.')
import version
install_distutils_tasks()
DEFAULT_ARDUINO_BOARDS = ['uno']
PROJECT_PREFIX = [d for d in path('.').dirs()
if d.joinpath('Arduino').isdir()
and d.name not in ('build', )][0].name
name = PROJECT_PREFIX.replace('_', '-')
package_name = name
rpc_module = import_module(PROJECT_PREFIX)
VERSION = version.getVersion()
URL='http://github.com/wheeler-microfluidics/%s.git' % name
PROPERTIES = OrderedDict([('name', PROJECT_PREFIX),
('manufacturer', 'GaudiLabs'),
('software_version', VERSION),
('url', URL)])
options(
rpc_module=rpc_module,
PROPERTIES=PROPERTIES,
base_classes=['BaseNodeSerialHandler',
'BaseNodeEeprom',
'BaseNodeI2c',
'BaseNodeI2cHandler<Handler>',
'BaseNodeConfig<ConfigMessage, Address>',
'BaseNodeState<StateMessage>'],
rpc_classes=['open_drop::Node'],
DEFAULT_ARDUINO_BOARDS=DEFAULT_ARDUINO_BOARDS,
setup=dict(name=PROJECT_PREFIX.replace('_', '-'),
version=VERSION,
description='Arduino RPC node packaged as Python package.',
author='Christian Fobel',
author_email='[email protected]',
url=URL,
license='GPLv2',
install_requires=['base-node-rpc>=0.11.post21',
'arduino-helpers>=0.3.post10'],
include_package_data=True, packages=[str(PROJECT_PREFIX)]))
| gpl-3.0 | -3,173,363,006,376,368,600 | 36.92 | 78 | 0.594937 | false |
DiegoCorrea/ouvidoMusical | apps/similarities/Cosine/algorithm/views.py | 1 | 2656 | from .algorithm import CosineSimilarity
from apps.data.songs.models import Song, SongSimilarity
from apps.similarities.Cosine.benchmark.models import BenchCosine_SongTitle
from django.db import transaction
from django.utils import timezone
from multiprocessing.dummy import Pool as ThreadPool
from apps.CONSTANTS import MAX_THREAD
from random import sample
import numpy as np
import logging
logger = logging.getLogger(__name__)
songInterator = {}
similarityMatrix = []
def saveTitleSimilarity(sBase):
global similarityMatrix
global songInterator
logger.info("++ Song Psition: " + str(songInterator[sBase]['pos']))
for sComp in songInterator:
if songInterator[sBase]['pos'] >= songInterator[sComp]['pos']:
continue
try:
SongSimilarity.objects.create(
songBase=songInterator[sBase]['obj'],
songCompare=songInterator[sComp]['obj'],
similarity=similarityMatrix[songInterator[sBase]['pos']][songInterator[sComp]['pos']]
)
except Exception as e:
logger.error(str(e))
continue
def TitleSimilarity():
logger.info("[Start Title Similarity]")
global similarityMatrix
global songInterator
allSongs = Song.objects.all()
line = 0
similarityMatrix = CosineSimilarity([song.title for song in allSongs])
for song in allSongs:
songInterator.setdefault(song.id, {
'obj': song,
'pos': line
}
)
line += 1
# Persiste Title similarity
logger.info("Start to persiste Title similarity")
pool = ThreadPool(MAX_THREAD)
with transaction.atomic():
pool.map(saveTitleSimilarity, songInterator)
pool.close()
pool.join()
logger.info("[Finish Title Similarity]")
def TitleSimilarityWithObserver(setSize):
logger.info("[Start Title Similarity]")
allSongs = sample(set(Song.objects.all()), setSize)
line = 0
similarityVale = []
startedAt = timezone.now()
similarityMatrix = CosineSimilarity([song.title for song in allSongs])
finishedAt = timezone.now()
for i in range(len(allSongs)):
for j in range(i, len(allSongs)):
if j == i:
continue
line += 1
similarityVale.append(similarityMatrix[i][j])
BenchCosine_SongTitle.objects.create(
setSize=setSize,
similarity=np.mean(similarityVale),
started_at=startedAt,
finished_at=finishedAt
)
logger.info(
"Benchmark: Start at - "
+ str(startedAt)
+ " || Finished at -"
+ str(finishedAt)
)
| mit | 6,042,651,033,651,817,000 | 30.247059 | 101 | 0.643825 | false |
blackmatrix7/apizen | app/config.py | 1 | 3877 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/5/28 20:50
# @Author : BlackMatrix
# @Sit :
# @File : config.py
# @Software : PyCharm
import os
__author__ = 'blackmatrix'
class BaseConfig:
DEBUG = True
TESTING = False
HOST = '0.0.0.0'
PORT = 8080
WORKS = 5
SITE_NAME = 'ApiZen'
LOGGER_NAME = 'Api Zen'
# 数据配置
MARIADB_HOST = os.environ.get('MARIADB_HOST', '127.0.0.1')
MARIADB_PORT = os.environ.get('MARIADB_PORT', 3306)
MARIADB_USER = os.environ.get('MARIADB_USER', 'apizen')
MARIADB_PASS = os.environ.get('MARIADB_PASS', 'apizen')
MARIADB_DB = os.environ.get('MARIADB_DB', 'apizen')
# SQLAlchemy
SQLALCHEMY_BINDS = {}
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://{0}:{1}@{2}:{3}/{4}?charset=utf8'.format(
MARIADB_USER,
MARIADB_PASS,
MARIADB_HOST,
MARIADB_PORT,
MARIADB_DB
)
# API ZEN
APIZEN_ROUTE = ('/api/router/rest', '/api/router/json')
APIZEN_VERSIONS = ('app.demo.methods', 'app.webapi.methods')
APIZEN_DATE_FMT = '%Y/%m/%d'
APIZEN_DATETIME_FMT = '%Y/%m/%d %H:%M:%S'
APIZEN_RESP_FMT = '{"meta": {"code": {code}, "message": {message}}, "response": {response}}'
# Celery
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
CELERY_ACCEPT_CONTENT = ['json']
CELERY_REDIRECT_STDOUTS_LEVEL = 'INFO'
CELERY_IMPORTS = ('app.tasks', )
# celery worker的并发数
CELERYD_CONCURRENCY = 3
# 默认队列
CELERY_DEFAULT_QUEUE = '[email protected]'
# Flask Mail
ADMIN_EMAIL = os.environ.get('ADMIN_EMAIL', '').split(',')
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = 25
MAIL_USE_TLS = False
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('MAIL_USERNAME')
SUBJECT_PREFIX = '[ApiZen]'
class DevConfig(BaseConfig):
DEBUG = True
TESTING = True
# 端口号
PORT = 8080
# 数据库配置
MARIADB_HOST = os.environ.get('MARIADB_HOST', '127.0.0.1')
MARIADB_PORT = os.environ.get('MARIADB_PORT', 3306)
MARIADB_USER = os.environ.get('MARIADB_USER', 'apizen')
MARIADB_PASS = os.environ.get('MARIADB_PASS', 'apizen')
MARIADB_DB = os.environ.get('MARIADB_DB', 'apizen')
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://{0}:{1}@{2}:{3}/{4}?charset=utf8'.format(
MARIADB_USER,
MARIADB_PASS,
MARIADB_HOST,
MARIADB_PORT,
MARIADB_DB
)
# Celery
# 默认队列
CELERY_DEFAULT_QUEUE = '[email protected]'
class TestConfig(BaseConfig):
DEBUG = False
TESTING = False
# 端口号
PORT = 8080
# 数据库配置
MARIADB_HOST = os.environ.get('MARIADB_HOST', '127.0.0.1')
MARIADB_PORT = os.environ.get('MARIADB_PORT', 3306)
MARIADB_USER = os.environ.get('MARIADB_USER', 'apizen')
MARIADB_PASS = os.environ.get('MARIADB_PASS', 'apizen')
MARIADB_DB = os.environ.get('MARIADB_DB', 'apizen')
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://{0}:{1}@{2}:{3}/{4}?charset=utf8'.format(
MARIADB_USER,
MARIADB_PASS,
MARIADB_HOST,
MARIADB_PORT,
MARIADB_DB
)
# Celery
# 默认队列
CELERY_DEFAULT_QUEUE = '[email protected]'
class ProdConfig(BaseConfig):
DEBUG = False
TESTING = False
# 端口号
PORT = 8080
WORKS = 5
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Celery
# 默认队列
CELERY_DEFAULT_QUEUE = '[email protected]'
devcfg = DevConfig
testcfg = TestConfig
prodcfg = ProdConfig
default = DevConfig
configs = {
'devcfg': devcfg,
'testcfg': testcfg,
'prodcfg': prodcfg,
'default': default
}
if __name__ == '__main__':
pass
| apache-2.0 | -9,111,898,448,475,030,000 | 23.940789 | 96 | 0.608547 | false |
NMisko/monkalot | bot/utilities/startgame.py | 1 | 1791 | """Contains functions to control games."""
import time
from bot.utilities.permission import Permission
from bot.utilities.tools import replace_vars
def start_game(bot, user, msg, cmd):
"""Return whether a user can start a game.
Takes off points if a non moderator wants to start a game.
Also makes sure only one game is running at a time.
"""
responses = bot.config.responses["startGame"]
if bot.game_running:
return False
elif (
bot.get_permission(user) in [Permission.User, Permission.Subscriber]
and msg == cmd
):
"""Check if pleb_gametimer is not on cooldown."""
if (time.time() - bot.last_plebgame) > bot.config.pleb_gametimer:
# The calling user is not a mod, so we subtract 5 points.
if bot.ranking.get_points(user) > bot.config.config["points"]["game_start"]:
bot.last_plebgame = time.time() # Set pleb_gametimer
bot.ranking.increment_points(
user, -int(bot.config.config["points"]["game_start"]), bot
)
bot.game_running = True
return True
else:
var = {"<AMOUNT>": int(bot.config.config["points"]["game_start"])}
bot.write(replace_vars(responses["points_needed"]["msg"], var))
return False
else:
t = bot.config.pleb_gametimer - time.time() + bot.last_plebgame
next_plebgame = "%8.0f" % t
var = {"<COOLDOWN>": next_plebgame}
bot.write(replace_vars(responses["plebgames_on_cooldown"]["msg"], var))
else: # The calling user is a mod, so we only check if the command is correct
if msg == cmd:
bot.game_running = True
return msg == cmd
| mit | -8,835,198,294,969,289,000 | 38.8 | 88 | 0.58459 | false |
Acimaz/Google_Apple_Financial_Reporter | Reporter.py | 1 | 5765 | # Reporting tool for querying Sales- and Financial Reports from iTunes Connect and Google Developer Console
#
# This tool can be used to download financial reports from both Google and Apple
# for the app of your choice (of course i assume that you are the owner of this app)
#
# Copyright (c) 2017 Ayhan Sakarya, Kaasa health GmbH <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import getopt
import csv
import os
from Utility import ReportDate
from GoogleReports import GoogleReporter
from AppleReporter import ApplePythonReport
currentDate = None
googleReporter = None
appleReporter = None
def UpdateMainReportFile(date):
global googleReporter
global appleReporter
fileExists = os.path.isfile('financialReport.csv')
with open('financialReport.csv', 'r') as csvFileRead:
print 'Updating financialReport.csv..'
dateExists = False
deleteFirstRows = False
headers = ['Date', 'Platform', 'newSubscriptions', 'cancelledSubscriptions', 'activeSubscriptions']
reader = csv.DictReader(csvFileRead, delimiter=',')
#print 'Length: ' + len(list(reader)).__str__()
readerList = list(reader)
csvFileRead.seek(0)
listLength = 0
for line in reader:
listLength += 1
if date == line['Date']:
dateExists = True
if listLength > 118: #118 because we want to have the data of the past 60 days and we have 2 rows for each day (google, apple)
deleteFirstRows = True
csvFileRead.seek(0)
with open('financialReport.csv', 'w') as csvFileWriter:
writer = csv.DictWriter(csvFileWriter, delimiter=',', lineterminator='\n', fieldnames=headers)
writer.writeheader()
replaced = False
startIndex = 2 if deleteFirstRows else 0
for line in readerList[startIndex:]:
if date == line['Date']:
if line['Platform'] == 'Apple':
writer.writerow(
{'Date': date, 'Platform': 'Apple', 'newSubscriptions': appleReporter.subscribers,
'cancelledSubscriptions': appleReporter.cancellations,
'activeSubscriptions': appleReporter.activeSubscribers})
if line['Platform'] == 'Google':
writer.writerow(
{'Date': date, 'Platform': 'Google', 'newSubscriptions': googleReporter.subscribers,
'cancelledSubscriptions': googleReporter.cancellations,
'activeSubscriptions': googleReporter.activeSubscribers})
replaced = True
else:
writer.writerow(line)
if not replaced:
writer.writerow(
{'Date': date, 'Platform': 'Apple', 'newSubscriptions': appleReporter.subscribers,
'cancelledSubscriptions': appleReporter.cancellations,
'activeSubscriptions': appleReporter.activeSubscribers})
writer.writerow(
{'Date': date, 'Platform': 'Google', 'newSubscriptions': googleReporter.subscribers,
'cancelledSubscriptions': googleReporter.cancellations,
'activeSubscriptions': googleReporter.activeSubscribers})
def main(argv):
global currentDate
global googleReporter
global appleReporter
try:
opts, args = getopt.getopt(argv, "d:", ["days="])
except getopt.GetoptError:
print 'Reporter.py -d <daysBefore>'
sys.exit(2)
for opt, arg in opts:
if opt in ("-d", "--days"):
currentDate = ReportDate(int(arg))
print 'Downloading financial reports for ' + currentDate.ToString() + "..."
googleReporter = GoogleReporter(
currentDate.year.__str__() + currentDate.month.__str__() + currentDate.day.__str__())
appleReporter = ApplePythonReport(currentDate.year.__str__() + currentDate.month.__str__() + currentDate.day.__str__())
# print '\nGoogle\nSubscribers: ' + googleReporter.subscribers.__str__() + ' Cancellations: ' + googleReporter.cancellations.__str__() + ' Active Users: ' + googleReporter.activeSubscribers.__str__()
# print 'Apple\nSubscribers: ' + appleReporter.subscribers.__str__() + ' Cancellations: ' + appleReporter.cancellations.__str__() + ' Active Users: ' + appleReporter.activeSubscribers.__str__()
UpdateMainReportFile(currentDate.year.__str__() + '-' + currentDate.month.__str__() + '-' + currentDate.day.__str__())
print 'Financial Reports are now up to date!\n'
if __name__ == "__main__":
main(sys.argv[1:])
| mit | 5,068,535,535,105,347,000 | 48.273504 | 203 | 0.646487 | false |
openstack/swift | swift/common/middleware/recon.py | 1 | 17726 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import json
import os
import time
from resource import getpagesize
from swift import __version__ as swiftver
from swift import gettext_ as _
from swift.common.constraints import check_mount
from swift.common.storage_policy import POLICIES
from swift.common.swob import Request, Response
from swift.common.utils import get_logger, SWIFT_CONF_FILE, md5_hash_for_file
from swift.common.recon import RECON_OBJECT_FILE, RECON_CONTAINER_FILE, \
RECON_ACCOUNT_FILE, RECON_DRIVE_FILE, RECON_RELINKER_FILE, \
DEFAULT_RECON_CACHE_PATH
class ReconMiddleware(object):
"""
Recon middleware used for monitoring.
/recon/load|mem|async... will return various system metrics.
Needs to be added to the pipeline and requires a filter
declaration in the [account|container|object]-server conf file:
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
"""
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.devices = conf.get('devices', '/srv/node')
swift_dir = conf.get('swift_dir', '/etc/swift')
self.logger = get_logger(conf, log_route='recon')
self.recon_cache_path = conf.get('recon_cache_path',
DEFAULT_RECON_CACHE_PATH)
self.object_recon_cache = os.path.join(self.recon_cache_path,
RECON_OBJECT_FILE)
self.container_recon_cache = os.path.join(self.recon_cache_path,
RECON_CONTAINER_FILE)
self.account_recon_cache = os.path.join(self.recon_cache_path,
RECON_ACCOUNT_FILE)
self.drive_recon_cache = os.path.join(self.recon_cache_path,
RECON_DRIVE_FILE)
self.relink_recon_cache = os.path.join(self.recon_cache_path,
RECON_RELINKER_FILE)
self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz')
self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz')
self.rings = [self.account_ring_path, self.container_ring_path]
# include all object ring files (for all policies)
for policy in POLICIES:
self.rings.append(os.path.join(swift_dir,
policy.ring_name + '.ring.gz'))
def _from_recon_cache(self, cache_keys, cache_file, openr=open,
ignore_missing=False):
"""retrieve values from a recon cache file
:params cache_keys: list of cache items to retrieve
:params cache_file: cache file to retrieve items from.
:params openr: open to use [for unittests]
:params ignore_missing: Some recon stats are very temporary, in this
case it would be better to not log if things are missing.
:return: dict of cache items and their values or none if not found
"""
try:
with openr(cache_file, 'r') as f:
recondata = json.load(f)
return {key: recondata.get(key) for key in cache_keys}
except IOError as err:
if err.errno == errno.ENOENT and ignore_missing:
pass
else:
self.logger.exception(_('Error reading recon cache file'))
except ValueError:
self.logger.exception(_('Error parsing recon cache file'))
except Exception:
self.logger.exception(_('Error retrieving recon data'))
return dict((key, None) for key in cache_keys)
def get_version(self):
"""get swift version"""
verinfo = {'version': swiftver}
return verinfo
def get_mounted(self, openr=open):
"""get ALL mounted fs from /proc/mounts"""
mounts = []
with openr('/proc/mounts', 'r') as procmounts:
for line in procmounts:
mount = {}
mount['device'], mount['path'], opt1, opt2, opt3, \
opt4 = line.rstrip().split()
mounts.append(mount)
return mounts
def get_load(self, openr=open):
"""get info from /proc/loadavg"""
loadavg = {}
with openr('/proc/loadavg', 'r') as f:
onemin, fivemin, ftmin, tasks, procs = f.read().rstrip().split()
loadavg['1m'] = float(onemin)
loadavg['5m'] = float(fivemin)
loadavg['15m'] = float(ftmin)
loadavg['tasks'] = tasks
loadavg['processes'] = int(procs)
return loadavg
def get_mem(self, openr=open):
"""get info from /proc/meminfo"""
meminfo = {}
with openr('/proc/meminfo', 'r') as memlines:
for i in memlines:
entry = i.rstrip().split(":")
meminfo[entry[0]] = entry[1].strip()
return meminfo
def get_async_info(self):
"""get # of async pendings"""
return self._from_recon_cache(['async_pending', 'async_pending_last'],
self.object_recon_cache)
def get_driveaudit_error(self):
"""get # of drive audit errors"""
return self._from_recon_cache(['drive_audit_errors'],
self.drive_recon_cache)
def get_sharding_info(self):
"""get sharding info"""
return self._from_recon_cache(["sharding_stats",
"sharding_time",
"sharding_last"],
self.container_recon_cache)
def get_replication_info(self, recon_type):
"""get replication info"""
replication_list = ['replication_time',
'replication_stats',
'replication_last']
if recon_type == 'account':
return self._from_recon_cache(replication_list,
self.account_recon_cache)
elif recon_type == 'container':
return self._from_recon_cache(replication_list,
self.container_recon_cache)
elif recon_type == 'object':
replication_list += ['object_replication_time',
'object_replication_last']
return self._from_recon_cache(replication_list,
self.object_recon_cache)
else:
return None
def get_device_info(self):
"""get devices"""
try:
return {self.devices: os.listdir(self.devices)}
except Exception:
self.logger.exception(_('Error listing devices'))
return {self.devices: None}
def get_updater_info(self, recon_type):
"""get updater info"""
if recon_type == 'container':
return self._from_recon_cache(['container_updater_sweep'],
self.container_recon_cache)
elif recon_type == 'object':
return self._from_recon_cache(['object_updater_sweep'],
self.object_recon_cache)
else:
return None
def get_expirer_info(self, recon_type):
"""get expirer info"""
if recon_type == 'object':
return self._from_recon_cache(['object_expiration_pass',
'expired_last_pass'],
self.object_recon_cache)
def get_auditor_info(self, recon_type):
"""get auditor info"""
if recon_type == 'account':
return self._from_recon_cache(['account_audits_passed',
'account_auditor_pass_completed',
'account_audits_since',
'account_audits_failed'],
self.account_recon_cache)
elif recon_type == 'container':
return self._from_recon_cache(['container_audits_passed',
'container_auditor_pass_completed',
'container_audits_since',
'container_audits_failed'],
self.container_recon_cache)
elif recon_type == 'object':
return self._from_recon_cache(['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
self.object_recon_cache)
else:
return None
def get_unmounted(self):
"""list unmounted (failed?) devices"""
mountlist = []
for entry in os.listdir(self.devices):
if not os.path.isdir(os.path.join(self.devices, entry)):
continue
try:
check_mount(self.devices, entry)
except OSError as err:
mounted = str(err)
except ValueError:
mounted = False
else:
continue
mountlist.append({'device': entry, 'mounted': mounted})
return mountlist
def get_diskusage(self):
"""get disk utilization statistics"""
devices = []
for entry in os.listdir(self.devices):
if not os.path.isdir(os.path.join(self.devices, entry)):
continue
try:
check_mount(self.devices, entry)
except OSError as err:
devices.append({'device': entry, 'mounted': str(err),
'size': '', 'used': '', 'avail': ''})
except ValueError:
devices.append({'device': entry, 'mounted': False,
'size': '', 'used': '', 'avail': ''})
else:
path = os.path.join(self.devices, entry)
disk = os.statvfs(path)
capacity = disk.f_bsize * disk.f_blocks
available = disk.f_bsize * disk.f_bavail
used = disk.f_bsize * (disk.f_blocks - disk.f_bavail)
devices.append({'device': entry, 'mounted': True,
'size': capacity, 'used': used,
'avail': available})
return devices
def get_ring_md5(self):
"""get all ring md5sum's"""
sums = {}
for ringfile in self.rings:
if os.path.exists(ringfile):
try:
sums[ringfile] = md5_hash_for_file(ringfile)
except IOError as err:
sums[ringfile] = None
if err.errno != errno.ENOENT:
self.logger.exception(_('Error reading ringfile'))
return sums
def get_swift_conf_md5(self):
"""get md5 of swift.conf"""
hexsum = None
try:
hexsum = md5_hash_for_file(SWIFT_CONF_FILE)
except IOError as err:
if err.errno != errno.ENOENT:
self.logger.exception(_('Error reading swift.conf'))
return {SWIFT_CONF_FILE: hexsum}
def get_quarantine_count(self):
"""get obj/container/account quarantine counts"""
qcounts = {"objects": 0, "containers": 0, "accounts": 0,
"policies": {}}
qdir = "quarantined"
for device in os.listdir(self.devices):
qpath = os.path.join(self.devices, device, qdir)
if os.path.exists(qpath):
for qtype in os.listdir(qpath):
qtgt = os.path.join(qpath, qtype)
linkcount = os.lstat(qtgt).st_nlink
if linkcount > 2:
if qtype.startswith('objects'):
if '-' in qtype:
pkey = qtype.split('-', 1)[1]
else:
pkey = '0'
qcounts['policies'].setdefault(pkey,
{'objects': 0})
qcounts['policies'][pkey]['objects'] \
+= linkcount - 2
qcounts['objects'] += linkcount - 2
else:
qcounts[qtype] += linkcount - 2
return qcounts
def get_socket_info(self, openr=open):
"""
get info from /proc/net/sockstat and sockstat6
Note: The mem value is actually kernel pages, but we return bytes
allocated based on the systems page size.
"""
sockstat = {}
try:
with openr('/proc/net/sockstat', 'r') as proc_sockstat:
for entry in proc_sockstat:
if entry.startswith("TCP: inuse"):
tcpstats = entry.split()
sockstat['tcp_in_use'] = int(tcpstats[2])
sockstat['orphan'] = int(tcpstats[4])
sockstat['time_wait'] = int(tcpstats[6])
sockstat['tcp_mem_allocated_bytes'] = \
int(tcpstats[10]) * getpagesize()
except IOError as e:
if e.errno != errno.ENOENT:
raise
try:
with openr('/proc/net/sockstat6', 'r') as proc_sockstat6:
for entry in proc_sockstat6:
if entry.startswith("TCP6: inuse"):
sockstat['tcp6_in_use'] = int(entry.split()[2])
except IOError as e:
if e.errno != errno.ENOENT:
raise
return sockstat
def get_time(self):
"""get current time"""
return time.time()
def get_relinker_info(self):
"""get relinker info, if any"""
stat_keys = ['devices', 'workers']
return self._from_recon_cache(stat_keys,
self.relink_recon_cache,
ignore_missing=True)
def GET(self, req):
root, rcheck, rtype = req.split_path(1, 3, True)
all_rtypes = ['account', 'container', 'object']
if rcheck == "mem":
content = self.get_mem()
elif rcheck == "load":
content = self.get_load()
elif rcheck == "async":
content = self.get_async_info()
elif rcheck == 'replication' and rtype in all_rtypes:
content = self.get_replication_info(rtype)
elif rcheck == 'replication' and rtype is None:
# handle old style object replication requests
content = self.get_replication_info('object')
elif rcheck == "devices":
content = self.get_device_info()
elif rcheck == "updater" and rtype in ['container', 'object']:
content = self.get_updater_info(rtype)
elif rcheck == "auditor" and rtype in all_rtypes:
content = self.get_auditor_info(rtype)
elif rcheck == "expirer" and rtype == 'object':
content = self.get_expirer_info(rtype)
elif rcheck == "mounted":
content = self.get_mounted()
elif rcheck == "unmounted":
content = self.get_unmounted()
elif rcheck == "diskusage":
content = self.get_diskusage()
elif rcheck == "ringmd5":
content = self.get_ring_md5()
elif rcheck == "swiftconfmd5":
content = self.get_swift_conf_md5()
elif rcheck == "quarantined":
content = self.get_quarantine_count()
elif rcheck == "sockstat":
content = self.get_socket_info()
elif rcheck == "version":
content = self.get_version()
elif rcheck == "driveaudit":
content = self.get_driveaudit_error()
elif rcheck == "time":
content = self.get_time()
elif rcheck == "sharding":
content = self.get_sharding_info()
elif rcheck == "relinker":
content = self.get_relinker_info()
else:
content = "Invalid path: %s" % req.path
return Response(request=req, status="404 Not Found",
body=content, content_type="text/plain")
if content is not None:
return Response(request=req, body=json.dumps(content),
content_type="application/json")
else:
return Response(request=req, status="500 Server Error",
body="Internal server error.",
content_type="text/plain")
def __call__(self, env, start_response):
req = Request(env)
if req.path.startswith('/recon/'):
return self.GET(req)(env, start_response)
else:
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def recon_filter(app):
return ReconMiddleware(app, conf)
return recon_filter
| apache-2.0 | -7,486,379,650,210,433,000 | 40.415888 | 79 | 0.518278 | false |
eragonruan/text-detection-ctpn | utils/text_connector/detectors.py | 1 | 2097 | # coding:utf-8
import numpy as np
from utils.bbox.nms import nms
from .text_connect_cfg import Config as TextLineCfg
from .text_proposal_connector import TextProposalConnector
from .text_proposal_connector_oriented import TextProposalConnector as TextProposalConnectorOriented
class TextDetector:
def __init__(self, DETECT_MODE="H"):
self.mode = DETECT_MODE
if self.mode == "H":
self.text_proposal_connector = TextProposalConnector()
elif self.mode == "O":
self.text_proposal_connector = TextProposalConnectorOriented()
def detect(self, text_proposals, scores, size):
# 删除得分较低的proposal
keep_inds = np.where(scores > TextLineCfg.TEXT_PROPOSALS_MIN_SCORE)[0]
text_proposals, scores = text_proposals[keep_inds], scores[keep_inds]
# 按得分排序
sorted_indices = np.argsort(scores.ravel())[::-1]
text_proposals, scores = text_proposals[sorted_indices], scores[sorted_indices]
# 对proposal做nms
keep_inds = nms(np.hstack((text_proposals, scores)), TextLineCfg.TEXT_PROPOSALS_NMS_THRESH)
text_proposals, scores = text_proposals[keep_inds], scores[keep_inds]
# 获取检测结果
text_recs = self.text_proposal_connector.get_text_lines(text_proposals, scores, size)
keep_inds = self.filter_boxes(text_recs)
return text_recs[keep_inds]
def filter_boxes(self, boxes):
heights = np.zeros((len(boxes), 1), np.float)
widths = np.zeros((len(boxes), 1), np.float)
scores = np.zeros((len(boxes), 1), np.float)
index = 0
for box in boxes:
heights[index] = (abs(box[5] - box[1]) + abs(box[7] - box[3])) / 2.0 + 1
widths[index] = (abs(box[2] - box[0]) + abs(box[6] - box[4])) / 2.0 + 1
scores[index] = box[8]
index += 1
return np.where((widths / heights > TextLineCfg.MIN_RATIO) & (scores > TextLineCfg.LINE_MIN_SCORE) &
(widths > (TextLineCfg.TEXT_PROPOSALS_WIDTH * TextLineCfg.MIN_NUM_PROPOSALS)))[0]
| mit | -1,105,246,956,184,877,400 | 41.854167 | 108 | 0.633933 | false |
ashleyjsands/machine-learning | benchmark.py | 1 | 1576 | """
This script is used to benchmark neural network performance to determine which optimisations are useful.
"""
from neural_network import *
from data import *
from neural_network import get_index_of_maximum_value
import time
def print_intro():
print "Benchmarking neural network implementation"
def get_neural_network():
number_of_inputs = 28 * 28
number_of_ouputs = 10
sizes = [128, number_of_ouputs]
return create_random_neural_network(number_of_inputs, sizes)
def benchmark_neural_network(neural_network, training_set, validation_set, test_set):
runs = 10
batch_size = 10
learning_rate = 3.0
durations = []
for i in range(runs):
random.shuffle(training_set)
batch = training_set[0:batch_size]
start = time.clock()
error = neural_network.train_batch(batch, learning_rate)
end = time.clock()
durations.append(end - start)
return sum(durations) / len(durations)
def main():
"""
Benchmark a specific type of neural network a number of times and print out the average duration.
"""
print_intro()
neural_network = get_neural_network()
training_set, validation_set, test_set = load_digit_data()
average_duration = benchmark_neural_network(neural_network, convert_data_set_into_data_points_and_labels(training_set), convert_data_set_into_data_points_and_labels(validation_set), convert_data_set_into_data_points_and_labels(test_set))
print "The benchmark took an average %s seconds per run." % average_duration
if __name__ == "__main__":
main()
| mit | 7,466,109,914,126,193,000 | 34.818182 | 241 | 0.695431 | false |
amanmehara/programming-app-data | Python/ScrappingHackerNewsWebsite/ScrappingHackerNewsWebsite.py | 1 | 3141 | '''
Copyright [2020] [Arun Kumar G]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Scraping the first 2 pages of Hacker news website which gives lot of Tech news(as a articles)
which has upvotes more than 100.User can just click on story link to see the article.
'''
'''
Program uses requests module to get web data from URL and BeautifulSoup module to parse the web data
as HTML using html parser.
Install requests and BeautifulSoup module before executing!
'''
import requests
from bs4 import BeautifulSoup
import pprint # prints the Final output in pretty manner which is inbuilt module in Python
response1 = requests.get("https://news.ycombinator.com/news") #Storing response of first page of website
response2 = requests.get("https://news.ycombinator.com/news?p=2") # Storing response of Second page of website
response1_html_parser = BeautifulSoup(response1.text,'html.parser') #parsing the received web data by html parser
response2_html_parser = BeautifulSoup(response2.text,'html.parser')
linksInPage1 = response1_html_parser.select('.storylink') #All links of tech news are included in class "Storylink"
linksInPage2 = response2_html_parser.select('.storylink')
votesInPage1 = response1_html_parser.select('.subtext') #All votes are stored inside subclass "score" of class "subtext"
votesInPage2 = response2_html_parser.select('.subtext')
mega_link = linksInPage1 + linksInPage2 # Combining links of both pages
#print(mega_link)
mega_votes = votesInPage1 + votesInPage2
def sorted_stories_list(hackerNewsList):
"""Sorting the list in decreasing order
with respect to votes"""
return sorted(hackerNewsList,key=lambda x:x['votes'],reverse=True)
def create_custom_hackernews(mega_link,mega_votes):
hackerNews =[]
for index,item in enumerate(mega_link):
title = mega_link[index].getText() #To get title of the story(news)
href = mega_link[index].get('href',None) # To get link of stroy(news).If no link is present, default is None
vote = mega_votes[index].select('.score') # points are stored inside class "score" of class subtext,if points/votes not available, then class score wont be present.
if len(vote): #To check if class "score" exists or not
points = int(vote[0].getText().replace(' points', ''))
if points > 100: # To get votes/points more than 100
hackerNews.append({'title': title, 'link': href,'votes': points})
return sorted_stories_list(hackerNews)
if __name__ == '__main__':
# Prints story link, story title and its votes in a pretty manner
pprint.pprint(create_custom_hackernews(mega_link,mega_votes))
| apache-2.0 | -5,395,644,967,868,740,000 | 45.191176 | 172 | 0.737663 | false |
Bezoar/surrender-rides | bp_content/themes/default/handlers/forms.py | 1 | 5264 | # *-* coding: UTF-8 *-*
"""
Created on June 10, 2012
@author: peta15
"""
__author__ = 'coto'
from datetime import datetime
from wtforms import fields
from wtforms import Form
from wtforms import validators, ValidationError
from webapp2_extras.i18n import lazy_gettext as _
from webapp2_extras.i18n import ngettext, gettext
from bp_includes.lib import utils
from bp_includes.forms import BaseForm, PasswordConfirmMixin, UsernameMixin
FIELD_MAXLENGTH = 80 # intended to stop maliciously long input
class FormTranslations(object):
def gettext(self, string):
return gettext(string)
def ngettext(self, singular, plural, n):
return ngettext(singular, plural, n)
class EmailMixin(BaseForm):
email = fields.TextField(_('Email'), [validators.Required(),
validators.Length(min=8, max=FIELD_MAXLENGTH, message=_(
"Field must be between %(min)d and %(max)d characters long.")),
validators.regexp(utils.EMAIL_REGEXP, message=_('Invalid email address.'))])
pass
# ==== Forms ====
class DeleteAccountForm(BaseForm):
password = fields.TextField(_('Password'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters."))],
id='l_password')
pass
class ContactForm(EmailMixin):
name = fields.TextField(_('Name'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_(
"Field cannot be longer than %(max)d characters.")),
validators.regexp(utils.NAME_LASTNAME_REGEXP, message=_(
"Name invalid. Use only letters and numbers."))])
message = fields.TextAreaField(_('Message'), [validators.Required(), validators.Length(max=65536)])
pass
def inbound_date_range_check(form, field):
if (None not in (form.inbound_departure_dt.data, form.inbound_arrival_dt.data)
and (form.inbound_departure_dt.data > form.inbound_arrival_dt.data)):
raise ValidationError("Inbound departure time, if provided, must be before your planned arrival at Surrender.")
def outbound_date_range_check(form, field):
if (None not in (form.outbound_departure_dt.data, form.outbound_arrival_dt.data)
and (form.outbound_departure_dt.data > form.outbound_arrival_dt.data)):
raise ValidationError("Outbound arrival time, if provided, must be after your planned departure from Surrender.")
class RequiredNameMixin(BaseForm):
NAME_LASTNAME_REGEXP = "^[0-9a-zA-ZàáâäãåąćęèéêëìíîïłńòóôöõøùúûüÿýżźñçčšžÀÁÂÄÃÅĄĆĘÈÉÊËÌÍÎÏŁŃÒÓÔÖÕØÙÚÛÜŸÝŻŹÑßÇŒÆČŠŽ∂ð ,.'-]*$"
FIELD_MAXLENGTH = 80
name = fields.TextField(_('First name'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")),
validators.regexp(NAME_LASTNAME_REGEXP, message=_(
"First name invalid. Use only letters and numbers."))])
last_name = fields.TextField(_('Last name'), [validators.Required(),
validators.Length(max=FIELD_MAXLENGTH, message=_("Field cannot be longer than %(max)d characters.")),
validators.regexp(NAME_LASTNAME_REGEXP, message=_(
"Last name invalid. Use only letters and numbers."))])
pass
class RequiredCityStateMixin(BaseForm):
city = fields.TextField(_('City'), [validators.Required()])
state = fields.TextField(_('State/Province'), [validators.Required()])
pass
class SurrenderRegisterForm(PasswordConfirmMixin, RequiredCityStateMixin,
UsernameMixin, RequiredNameMixin, EmailMixin):
country = fields.SelectField(_('Country'), choices=[])
tz = fields.SelectField(_('Timezone'), choices=[])
pass
class EditProfileForm(UsernameMixin, RequiredCityStateMixin, RequiredNameMixin):
DT_FORMAT = '%m/%d/%Y %I:%M %p' # for use with jquery-ui
country = fields.SelectField(_('Country'), choices=[])
tz = fields.SelectField(_('Timezone'), choices=[])
inbound_departure_dt = fields.DateTimeField(_('Estimated departure for Surrender'), [validators.optional(), inbound_date_range_check], format=DT_FORMAT)
inbound_arrival_dt = fields.DateTimeField(_('Estimated arrival at Surrender'), [validators.optional()], format=DT_FORMAT)
outbound_departure_dt = fields.DateTimeField(_('Estimated departure from Surrender'), [validators.optional()], format=DT_FORMAT)
outbound_arrival_dt = fields.DateTimeField(_('Estimated arrival at home'), [validators.optional(), outbound_date_range_check], format=DT_FORMAT)
needs = fields.TextAreaField(_('Needs'))
needs_met = fields.BooleanField(_('Needs met'))
offers = fields.TextAreaField(_('Offers'))
offers_taken = fields.BooleanField(_('Offers taken'))
notes = fields.TextAreaField(_('Notes'))
# No methods, just field definitions
pass
| mit | 4,451,003,861,496,569,000 | 48.352381 | 156 | 0.650328 | false |
shengqh/ngsperl | lib/GATK/mergeMutect.py | 1 | 4103 | import argparse
import sys
import logging
import os
import errno
import gzip
from asyncore import read
from Mutect import MutectItem, MutectResult
def check_file_exists(file):
if not os.path.exists(file):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), file)
def readFileMap(fileName):
check_file_exists(fileName)
result = {}
with open(fileName) as fh:
for line in fh:
filepath, name = line.strip().split('\t', 1)
result[name] = filepath.strip()
return(result)
def checkFileMap(fileMap):
for sname in fileMap.keys():
sfile = fileMap[sname]
check_file_exists(sfile)
def mergeMutect(logger, listFile, outputFile):
fileMap = readFileMap(listFile)
checkFileMap(fileMap)
fileValueMap = {}
chroms = []
comments = []
fileNames = sorted(fileMap.keys())
for fileName in fileNames:
filePath = fileMap[fileName]
logger.info("Reading %s ..." % filePath)
mutect = MutectResult()
mutect.readFromFile(logger, fileName, filePath)
fileValueMap[fileName] = mutect
if len(chroms) == 0:
chroms = mutect.findChromosomeFromComments()
comments = mutect.Comments
has_normal = any(v.NormalSampleName != None for v in fileValueMap.values())
logger.info("Output result to %s ..." % outputFile)
with open(outputFile, "wt") as fout:
for comment in comments:
if comment.startswith("##INFO=<ID=LOD"):
if has_normal:
fout.write('##FORMAT=<ID=ND,Number=1,Type=Integer,Description="Approximate normal sample read depth (reads with MQ=255 or with bad mates are filtered)">\n')
fout.write("%s\n" % comment.replace("##INFO=", "##FORMAT="))
else:
fout.write("%s\n" % comment)
fout.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % "\t".join(fileNames))
for chrom in chroms:
items = []
for mutect in fileValueMap.values():
if chrom in mutect.ChromosomeItemMap:
items.extend(mutect.ChromosomeItemMap[chrom])
posMap = {}
for item in items:
posMap.setdefault(item.POS, {}).setdefault(item.LocusKey, {})[item.SampleName] = item
for pos in sorted(posMap.keys()):
locusMap = posMap[pos]
for locus in sorted(locusMap.keys()):
sampleMap = locusMap[locus]
item = [v for v in sampleMap.values()][0]
if has_normal:
fout.write("%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s:ND:LOD" % (item.CHROM, item.POS, item.ID, item.REF, item.ALT, item.QUAL, item.FILTER, item.INFO, item.FORMAT))
else:
fout.write("%s\t%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s:LOD" % (item.CHROM, item.POS, item.ID, item.REF, item.ALT, item.QUAL, item.FILTER, item.INFO, item.FORMAT))
for sampleName in fileNames:
if sampleName in sampleMap:
item = sampleMap[sampleName]
if has_normal:
fout.write("\t%s:%d:%s" % (item.TumorData, item.NormalDepth, item.LOD))
else:
fout.write("\t%s:%s" % (item.TumorData, item.LOD))
else:
fout.write("\t./.")
fout.write("\n")
def main():
DEBUG=False
NotDEBUG=not DEBUG
parser = argparse.ArgumentParser(description="merge mutect result and keep tumor sample only.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input vcf list file', required=NotDEBUG)
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output vcf file", required=NotDEBUG)
args = parser.parse_args()
if DEBUG:
args.input = "H:/shengquanhu/projects/20190610_Ciombior_ExomeSeq/Ciombor_ExomeSeq__fileList1.list"
args.output = "H:/shengquanhu/projects/20190610_Ciombior_ExomeSeq/combined.tumor.vcf"
logger = logging.getLogger('mergeMutect')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
mergeMutect(logger, args.input, args.output)
logger.info("done.")
if __name__ == "__main__":
main()
| apache-2.0 | 5,800,809,459,799,280,000 | 33.771186 | 169 | 0.639288 | false |
gprMax/gprMax | setup.py | 1 | 7984 | # Copyright (C) 2015-2020: The University of Edinburgh
# Authors: Craig Warren and Antonis Giannopoulos
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
try:
import numpy as np
except ImportError:
raise ImportError('gprMax requires the NumPy package.')
import glob
import os
import pathlib
import re
import shutil
import sys
# Importing _version__.py before building can cause issues.
with open('gprMax/_version.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
# Parse package name from init file. Importing __init__.py / gprMax will break as gprMax depends on compiled .pyx files.
with open('gprMax/__init__.py', 'r') as fd:
packagename = re.search(r'^__name__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
packages = [packagename, 'tests', 'tools', 'user_libs']
# Parse long_description from README.rst file.
with open('README.rst','r') as fd:
long_description = fd.read()
# Python version
if sys.version_info[:2] < (3, 4):
sys.exit('\nExited: Requires Python 3.4 or newer!\n')
# Process 'build' command line argument
if 'build' in sys.argv:
print("Running 'build_ext --inplace'")
sys.argv.remove('build')
sys.argv.append('build_ext')
sys.argv.append('--inplace')
# Process '--no-cython' command line argument - either Cythonize or just compile the .c files
if '--no-cython' in sys.argv:
USE_CYTHON = False
sys.argv.remove('--no-cython')
else:
USE_CYTHON = True
# Build a list of all the files that need to be Cythonized looking in gprMax directory
cythonfiles = []
for root, dirs, files in os.walk(os.path.join(os.getcwd(), packagename), topdown=True):
for file in files:
if file.endswith('.pyx'):
cythonfiles.append(os.path.relpath(os.path.join(root, file)))
# Process 'cleanall' command line argument - cleanup Cython files
if 'cleanall' in sys.argv:
USE_CYTHON = False
for file in cythonfiles:
filebase = os.path.splitext(file)[0]
# Remove Cython C files
if os.path.isfile(filebase + '.c'):
try:
os.remove(filebase + '.c')
print('Removed: {}'.format(filebase + '.c'))
except OSError:
print('Could not remove: {}'.format(filebase + '.c'))
# Remove compiled Cython modules
libfile = glob.glob(os.path.join(os.getcwd(), os.path.splitext(file)[0]) + '*.pyd') + glob.glob(os.path.join(os.getcwd(), os.path.splitext(file)[0]) + '*.so')
if libfile:
libfile = libfile[0]
try:
os.remove(libfile)
print('Removed: {}'.format(os.path.abspath(libfile)))
except OSError:
print('Could not remove: {}'.format(os.path.abspath(libfile)))
# Remove build, dist, egg and __pycache__ directories
shutil.rmtree(os.path.join(os.getcwd(), 'build'), ignore_errors=True)
shutil.rmtree(os.path.join(os.getcwd(), 'dist'), ignore_errors=True)
shutil.rmtree(os.path.join(os.getcwd(), 'gprMax.egg-info'), ignore_errors=True)
for p in pathlib.Path(os.getcwd()).rglob('__pycache__'):
shutil.rmtree(p, ignore_errors=True)
print('Removed: {}'.format(p))
# Now do a normal clean
sys.argv[1] = 'clean' # this is what distutils understands
# Set compiler options
# Windows
if sys.platform == 'win32':
compile_args = ['/O2', '/openmp', '/w'] # No static linking as no static version of OpenMP library; /w disables warnings
linker_args = []
extra_objects = []
libraries=[]
# Mac OS X - needs gcc (usually via HomeBrew) because the default compiler LLVM (clang) does not support OpenMP
# - with gcc -fopenmp option implies -pthread
elif sys.platform == 'darwin':
gccpath = glob.glob('/usr/local/bin/gcc-[4-9]*')
gccpath += glob.glob('/usr/local/bin/gcc-[10-11]*')
if gccpath:
# Use newest gcc found
os.environ['CC'] = gccpath[-1].split(os.sep)[-1]
rpath = '/usr/local/opt/gcc/lib/gcc/' + gccpath[-1].split(os.sep)[-1][-1] + '/'
else:
raise('Cannot find gcc 4-10 in /usr/local/bin. gprMax requires gcc to be installed - easily done through the Homebrew package manager (http://brew.sh). Note: gcc with OpenMP support is required.')
compile_args = ['-O3', '-w', '-fopenmp', '-march=native'] # Sometimes worth testing with '-fstrict-aliasing', '-fno-common'
linker_args = ['-fopenmp', '-Wl,-rpath,' + rpath]
libraries = ['iomp5', 'pthread']
extra_objects = []
# Linux
elif sys.platform == 'linux':
compile_args = ['-O3', '-w', '-fopenmp', '-march=native']
linker_args = ['-fopenmp']
extra_objects = []
libraries=[]
# Build a list of all the extensions
extensions = []
for file in cythonfiles:
tmp = os.path.splitext(file)
if USE_CYTHON:
fileext = tmp[1]
else:
fileext = '.c'
extension = Extension(tmp[0].replace(os.sep, '.'),
[tmp[0] + fileext],
language='c',
include_dirs=[np.get_include()],
libraries=libraries,
extra_compile_args=compile_args,
extra_link_args=linker_args,
extra_objects=extra_objects)
extensions.append(extension)
# Cythonize (build .c files)
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions,
compiler_directives={
'boundscheck': False,
'wraparound': False,
'initializedcheck': False,
'embedsignature': True,
'language_level': 3
},
annotate=False)
# SetupTools Required to make package
import setuptools
setup(name=packagename,
version=version,
author='Craig Warren and Antonis Giannopoulos',
url='http://www.gprmax.com',
description='Electromagnetic Modelling Software based on the Finite-Difference Time-Domain (FDTD) method',
long_description=long_description,
long_description_content_type="text/x-rst",
license='GPLv3+',
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering'
],
#requirements
python_requires=">3.6",
install_requires=[
"colorama",
"cython",
"h5py",
"jupyter",
"matplotlib",
"numpy",
"psutil",
"scipy",
"terminaltables",
"tqdm",
],
ext_modules=extensions,
packages=packages,
include_package_data=True,
include_dirs=[np.get_include()],
zip_safe=False)
| gpl-3.0 | -5,500,311,628,205,997,000 | 37.200957 | 204 | 0.600952 | false |
lare-team/django-lare | django_lare/models.py | 1 | 2035 | from django_lare import VERSION
class Lare(object):
enabled = False
current_namespace = ""
previous_namespace = ""
version = VERSION
supported_version = "1.0.0"
def __init__(self, request):
super(Lare, self).__init__()
if 'HTTP_X_LARE' in request.META:
if 'HTTP_X_LARE_VERSION' in request.META:
frontend_version = request.META['HTTP_X_LARE_VERSION']
frontend_versions = frontend_version.split('.')
supported_versions = self.supported_version.split('.')
i = 0
while i < len(supported_versions):
if frontend_versions[i] < supported_versions[i]:
self.enabled = False
return
i += 1
self.enabled = True
self.previous_namespace = request.META['HTTP_X_LARE']
def set_current_namespace(self, namespace):
self.current_namespace = namespace
def get_current_namespace(self):
return self.current_namespace
def is_enabled(self):
return self.enabled
def get_matching_count(self, extension_namespace=None):
if not self.enabled:
return 0
if extension_namespace is None:
extension_namespace = self.current_namespace
matching_count = 0
previous_namespaces = self.previous_namespace.split('.')
extension_namespaces = extension_namespace.split('.')
while matching_count < len(previous_namespaces) and matching_count < len(extension_namespaces):
if previous_namespaces[matching_count] == extension_namespaces[matching_count]:
matching_count += 1
else:
break
return matching_count
def matches(self, extension_namespace=None):
if extension_namespace is None:
extension_namespace = self.current_namespace
return self.get_matching_count(extension_namespace) == len(extension_namespace.split('.'))
| mit | 3,086,362,984,252,951,600 | 34.701754 | 103 | 0.594595 | false |
previtus/MGR-Project-Code | Downloader/PreprocessData/GenListOfUrls.py | 1 | 1499 | # GenListOfUrls.py
import sys
sys.path.append('..')
def GenListOfUrls(Segments, PIXELS_X, PIXELS_Y, PrependPath='', minimal_length=20, custom=False):
'''
Iterates over the segment list and returns a list of urls needed for download
Outputs list of tripples in [ (<url>, <filename>, <edge id>), ... ]
'''
FilenameMap = []
verbose = False
num_of_segments_with_score = 0
num_of_image_urls_to_attempt_to_down = 0
for segment in Segments:
if verbose: segment.displaySegment()
if custom or not segment.hasUnknownScore():
# We only care about scored segments now...
num_of_segments_with_score += 1
if custom:
[urls, filenames] = segment.getGoogleViewUrls(PIXELS_X,PIXELS_Y)
else:
[urls, filenames] = segment.getGoogleViewUrls_whileUsingFractionsOfMinEdgeLen(PIXELS_X, PIXELS_Y, minimal_length)
#print len(urls), urls
num_of_image_urls_to_attempt_to_down += len(urls)
for i_nth_image in range(0, len(urls)):
if verbose: print urls, '\n', filenames, '\n'
#print filenames[i_nth_image]
FilenameMap.append((urls[i_nth_image], PrependPath+filenames[i_nth_image], segment.SegmentId, i_nth_image))
print "num_of_segments_with_score", num_of_segments_with_score
print "num_of_image_urls_to_attempt_to_down", num_of_image_urls_to_attempt_to_down
return FilenameMap
| mit | 9,217,931,892,103,464,000 | 36.475 | 129 | 0.623082 | false |
yeleman/snisi | snisi_core/migrations/0005_auto_20150205_1516.py | 1 | 1889 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('snisi_core', '0004_auto_20150114_1650'),
]
operations = [
migrations.CreateModel(
name='Accreditation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('location', models.ForeignKey(to='snisi_core.Entity')),
],
options={
'verbose_name': 'Accreditation',
'verbose_name_plural': 'Accreditations',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Privilege',
fields=[
('slug', models.SlugField(serialize=False, verbose_name='Slug', primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='Name')),
],
options={
'verbose_name': 'Privilege',
'verbose_name_plural': 'Privileges',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='accreditation',
name='privilege',
field=models.ForeignKey(to='snisi_core.Privilege'),
preserve_default=True,
),
migrations.AddField(
model_name='accreditation',
name='provider',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='provider',
name='privileges',
field=models.ManyToManyField(to='snisi_core.Privilege', through='snisi_core.Accreditation'),
preserve_default=True,
),
]
| mit | -7,704,077,489,148,694,000 | 32.140351 | 114 | 0.535733 | false |
DomBennett/pG-lt | pglt/tools/setup_tools.py | 1 | 16996 | #! /bin/usr/env python
# D.J. Bennett
# 07/11/2014
"""
pglt setup tools
"""
# PACKAGES
import argparse
import sys
import os
import re
import pickle
import csv
import logging
import platform
from datetime import datetime
from reseter_tools import Reseter
from special_tools import clean
from special_tools import stats
from special_tools import getThreads
from pglt import _PTHREADS as pthreads
# GLOBALS
PARS = None # both set at init
GPARS = None
pglt_version = None # set at run_pglt.py
pglt_doc = None
pglt_year = None
description = """
----------------------------------------------------------------------
pG-lt version {0}, Copyright (C) {1} Bennett
----------------------------------------------------------------------
This program comes with ABSOLUTELY NO WARRANTY. This is free software,
and you are welcome to redistribute it under certain conditions.
For more details, type `run_pglt.py --details`.
----------------------------------------------------------------------
"""
# MESSAGES
nonamestxt_msg = '\nERROR: No folders containing \'names.txt\' files \
found! All taxonomic names should be placed in subdirectories and \
called: \'names.txt\''
ioerror_msg = "[{0}] file could not be opened in [{1}]. Check that \
it is not opened by another program"
priming_msg = '\nERROR: The program was unable to start due to a \
problem with the files and folders in the study directory. Check the \
parameters and gene parameters .csv for any potential conflicts.'
# PROGRESS DICT
progress = {'1': 'not run', '2': 'not run', '3': 'not run', '4': 'not run'}
# ERROR CLASSES
class PrimingError(Exception):
pass
# FUNCTIONS
def printHeader():
"""Print a nice program description header"""
print description.format(pglt_version, pglt_year)
def calcWorkers(threads, nfolders, min_threads_per_worker=2,
max_threads_per_worker=100):
"""Calculate the number of workers for parallel running of folders"""
# get available threads on machine
available_threads = getThreads()
if available_threads:
# make sure threads arg is not greater than those available
if threads > available_threads:
sys.exit('More threads specified than avaiable on machine')
if threads == -1:
threads = available_threads
# make sure threads is absolute
threads = abs(threads)
# calc min_threads_per_worker if it is greater than threads
if min_threads_per_worker > threads:
min_threads_per_worker = threads
# calc max_threads_per_worker if it is greater than threads
if max_threads_per_worker > threads:
max_threads_per_worker = threads
# calc nworkers and threads_per_worker
# increase workers before threads_per_worker
threads_per_worker = min_threads_per_worker
for i in range(nfolders):
if (float(i)*threads_per_worker) > threads:
nworkers = i-1
break
else:
nworkers = nfolders
for i in range(min_threads_per_worker, max_threads_per_worker):
if (float(nworkers)*i) > threads:
threads_per_worker = i-1
break
else:
threads_per_worker = max_threads_per_worker
spare_threads = int(threads - (float(nworkers)*threads_per_worker))
return nworkers, threads_per_worker, spare_threads
def parseArguments(args=None):
"""Read command-line arguments"""
# TODO: too complex
stages_err_msg = 'Invalid stage argument. Use \'-s [from]-[to]\' for \
numbers 1 through 4.'
# get args
if not args:
args = createParser().parse_args()
if args.details:
print '\nThis is pG-lt version: ', pglt_version
print pglt_doc
sys.exit()
# check them
if args.stats:
stats()
sys.exit()
if args.clean:
clean()
sys.exit('Files and folders deleted')
if args.reset:
# first read default paradict and genedict
paradict = readInPars('')
genedict = readInGenePars('')
reseter = Reseter(paradict=paradict, genedict=genedict)
reseter.run()
if args.restart:
if args.retry:
print('Restarting and retrying folders that failed ....')
else:
print('Restarting ....')
return True, args.retry, None, None, None, None, None
if not args.email:
# stop if no email
sys.exit('An email address must be provided. Use \'-e\'.')
# extract stages
if not re.match('[1-4]-[1-4]', args.stages):
sys.exit(stages_err_msg)
startend = [int(e) for e in args.stages.split('-')]
stages = [str(e) for e in range(startend[0], startend[1]+1)]
if not stages:
sys.exit(stages_err_msg)
# check threads is a valid argument
if args.threads == 0 or args.threads < -1:
sys.exit('Invalid threads argument, must be -1 or >0.')
if pthreads and args.threads < 2:
sys.exit('pG-lt is set to use a parallelised version of RAxML, threads must be >= 2')
return False, False, args.email, args.threads, args.verbose, args.debug,\
stages
def getFolders():
"""Return folders in directory with names.txt files"""
# list all folders
unchecked_dirs = [f for f in os.listdir('.') if not os.path.isfile(f)]
# remove hidden folders
unchecked_dirs = [d for d in unchecked_dirs if not re.match('^\.', d)]
# loop through each and check they contain a names.txt
checked_dirs = []
for each in unchecked_dirs:
path = os.path.join(os.getcwd(), each)
files = os.listdir(path)
if 'names.txt' in files:
checked_dirs.append(each)
# TODO: change this to have folders with any of the stage folders too
if len(checked_dirs) > 0:
return checked_dirs
else:
sys.exit(nonamestxt_msg)
def setUpLogging(verbose, debug, logname, directory=os.getcwd()):
"""Set up logging : direct and control log statements"""
# get logger
logger = logging.getLogger(logname)
if debug:
# log all statements above DEBUG level
logger.setLevel(logging.DEBUG)
else:
# log all statements above INFO level
# (which is higher than DEBUG)
logger.setLevel(logging.INFO)
# add file hander to root
logfile = os.path.join(directory, 'log.txt')
loghandler = logging.FileHandler(logfile, 'a')
# set statement format -- I only want the message
loghandler.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(loghandler)
if verbose:
# if verbose, copy all info statements to console
console = logging.StreamHandler()
console.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(console)
logger.propagate = False
return logger
def tearDownLogging(logname):
"""Remove a logger"""
# get logger
logger = logging.getLogger(logname)
# remove handlers
handlers = logger.handlers[:]
for h in handlers:
logger.removeHandler(h)
def createParser():
"""Create parser for command-line"""
parser = argparse.ArgumentParser()
parser.add_argument("-email", "-e", help="please provide email \
for NCBI")
parser.add_argument('--restart', help='restart pipeline if stopped',
action='store_true')
parser.add_argument('--retry', help='if restarting, retry failed stages \
and folders?', action='store_true')
parser.add_argument('--reset', help='open reset mode to change files and \
folders', action='store_true')
parser.add_argument('--stats', help='get stats on status of folders',
action='store_true')
parser.add_argument("-threads", "-t", help="number of threads, default\
\'-1\', will use all available on machine", default=-1, type=int)
parser.add_argument("-stages", "-s", help="stages to run, default \
\'1-4\'", default='1-4')
parser.add_argument("--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument('--details', help='display information about the \
program', action='store_true')
parser.add_argument("--debug", help="log warnings (developer only)",
action="store_true")
parser.add_argument("--clean", help="remove all pG-lt files and \
folders (developer only)", action="store_true")
return parser
def logMessage(phase, logger, folders=None, stage=None, threads=None,
spare_threads=None, email=None, stages=None, counter=None,
retry=None):
# TODO: too complex
if phase == 'program-start':
logger.info(description.format(pglt_version, pglt_year))
logger.info('-' * 28 + ' Run details ' + '-' * 29)
logger.info('Running on [{0}] [{1}]'.format(platform.node(),
platform.platform()))
logger.info('Python [{0}]'.format(sys.version))
logger.info('Using [{0}] threads with [{1}] spare'.
format(threads, spare_threads))
logger.info('Using [{0}] as Entrez email'.format(email))
logger.info('Running stages {0}'.format(stages))
logger.info('Working with the following [{0}] folders:'.
format(len(folders)))
# convert folders to string
folder_string = ''
chars_counter = 0
for each in folders[:-1]:
chars_counter += len(each)
if chars_counter > 70:
# stop at 70 columns
folder_string += each + ',\n'
chars_counter = 0
else:
folder_string += each + ', '
folder_string += folders[-1]
logger.info('[{0}]'.format(folder_string))
logger.info('-' * 70 + '\n')
logger.info('-' * 31 + ' Start ' + '-' * 32)
elif phase == 'program-end':
logger.info('-' * 32 + ' End ' + '-' * 33)
elif phase == 'stage-start':
logger.info('Stage [{0}] started at [{1}]'.format(stage, timestamp()))
elif phase == 'stage-end':
logger.info('Stage [{0}] finished at [{1}] for [{2}] folders'.
format(stage, timestamp(), counter))
elif phase == 'program-restart':
if retry:
logger.info('{0}- Restarting and retrying [{1}] {0}'.
format('-' * 6, timestamp()))
else:
logger.info('{0}- Restarting [{1}] {0}'.
format('-' * 11, timestamp()))
else:
raise(ValueError('Unrecognised phase'))
def prime(directory, arguments, threads):
"""Write pickle files, print arguments"""
# Write pickle files
temp_dir = os.path.join(directory, 'tempfiles')
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir)
with open(os.path.join(temp_dir, "genedict.p"), "wb") as file:
pickle.dump(arguments['genedict'], file)
with open(os.path.join(temp_dir, "paradict.p"), "wb") as file:
pickle.dump(arguments['paradict'], file)
with open(os.path.join(temp_dir, "terms.p"), "wb") as file:
pickle.dump(arguments['terms'], file)
with open(os.path.join(temp_dir, 'threads.p'), "wb") as file:
pickle.dump(threads, file)
with open(os.path.join(temp_dir, 'progress.p'), "wb") as file:
pickle.dump(progress, file)
# Print arguments and parameters to file
record = 'Working with [{0}] names\n'.format(len(arguments['terms']))
record += recordPars(arguments['paradict'])
record += recordGpars(arguments['genedict'])
with open(os.path.join(directory, 'info.txt'), 'w') as file:
file.write(record)
def timestamp():
timestamp = datetime.today().strftime("%A, %d %B %Y %I:%M%p")
return timestamp
def recordPars(paradict):
"""Return pglt parameters string"""
record = '\nUsing the following parameters:\n'
for key in paradict.keys():
record += ' [{0}] = [{1}]\n'.format(key, paradict[key])
return record
def recordGpars(genedict):
"""Return gene parameters string"""
record = '\nUsing the following genes and gene parameters:\n'
for gene in genedict.keys():
record += ' Gene: [{0}]\n'.format(gene)
for par in genedict[gene]:
record += ' [{0}] = [{1}]\n'.format(par, genedict[gene][par])
return record
def readInNames(directory):
"""Read names from text file in dir"""
terms = []
with open(os.path.join(directory, 'names.txt')) as names:
for name in names:
terms.append(name.strip())
terms = [term for term in terms if not term == '']
return terms
def readInGenePars(gpars_file):
"""Read gene_parameters.csv. Return list of dictionaries."""
# TODO: too complex, consider breaking up
def _read(gpars_file, template, genes=None):
# open csv file and replace parameters in template
# if they are None. If genes specified, only read
# rows for those genes.
with open(gpars_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if genes:
if not row['gene'] in genes:
continue
temp = template.copy()
for key in temp.keys():
if row[key]:
if temp[key] is None:
if key == 'names':
# for names, split into a list of syns
temp[key] = row[key].split(':')
else:
temp[key] = row[key]
genedict[row['gene']] = temp
return genedict
# check if file exists, else use default
if not os.path.isfile(gpars_file):
return readInGenePars(GPARS)
# genedicts
genedict = {}
# template of dict in genedict
template = {'names': None, 'taxid': None, 'minlen': None, 'maxlen': None,
'maxgaps': None, 'minoverlap': None, 'maxfails': None,
'maxtrys': None, 'minseedsize': None, 'maxseedsize': None,
'maxseedtrys': None, 'partition': None, 'type': None}
# open file, read each row and fill in template
genedict = _read(gpars_file, template)
# if Nones, use defaults
nones = False
for gene in genedict.keys():
for par in genedict[gene].keys():
if genedict[gene][par] is None:
nones = True
break
if nones:
# run _read for defaults and limit to genes in genedict
genedict = _read(GPARS, template, genedict.keys())
return genedict
def readInPars(pars_file):
"""Read gene_parameters.csv. Return dictionary."""
def _read(pars_file, paradict):
# open csv, and replace all Nones
with open(pars_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if paradict[row["Parameter"]] is None:
paradict[row["Parameter"]] = row["Value"]
return paradict
# check if file exists, else use default
if not os.path.isfile(pars_file):
return readInPars(PARS)
# template
paradict = {'nseqs': None, 'naligns': None, 'nphylos': None,
'thoroughness': None, 'maxtrys': None, 'rttstat': None,
'parentid': None, 'outgroupid': None, 'constraint': None,
'minspecies': None, 'minspecies_gene': None,
'minnseqs_gene': None, 'target_ngenes': None, 'maxpn': None,
'votesize': None, 'maxvotetrys': None, 'taxonomic_constraint':
None}
# open file, read each row, extract value
paradict = _read(pars_file, paradict)
# if Nones remain, use default
nones = False
for key in paradict.keys():
if paradict[key] is None:
nones = True
break
if nones:
paradict = _read(PARS, paradict)
return paradict
def sortArgs(directory, email, logger):
"""Search for relevant files in dir, return list of arguments"""
# find text file and read, raise error if fail
try:
terms = readInNames(directory)
except IOError:
logger.error(ioerror_msg.format('names.txt', directory))
raise PrimingError()
# find gene parameter file and read, raise error if fail
try:
genedict = readInGenePars(os.path.join(directory,
'gene_parameters.csv'))
except IOError:
logger.error(ioerror_msg.format('gene_parameters.csv', directory))
raise PrimingError()
# find parameter file and read, raise error if fail
try:
paradict = readInPars(os.path.join(directory,
'parameters.csv'))
except IOError:
logger.error(ioerror_msg.format('parameters.csv', directory))
raise PrimingError()
# add email to paradict
paradict['email'] = email
return {'terms': terms, 'genedict': genedict, 'paradict': paradict}
| gpl-2.0 | -6,247,521,024,659,388,000 | 36.853007 | 93 | 0.595552 | false |
mediafactory/yats | sites/web/web/models.py | 1 | 3394 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.functional import lazy
from django.core.cache import cache
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from yats.models import tickets
from yats.models import base
import datetime
import base64
import httplib2
try:
import json
except ImportError:
from django.utils import simplejson as json
class ticket_component(base):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
verbose_name = _('module')
verbose_name_plural = _(u'modules')
ordering = ['name']
def getGibthubTags():
owner = settings.GITHUB_OWNER
repo = settings.GITHUB_REPO
user = settings.GITHUB_USER
password = settings.GITHUB_PASS
if not owner or not repo:
return ()
cache_name = 'yats.%s.%s.tags.github' % (owner, repo)
tags = cache.get(cache_name)
if tags:
return tuple(reversed(sorted(tags)))
# https://developer.github.com/v3/repos/#list-tags
result = []
headers = {
'Accept': 'application/vnd.github.v3+json',
'User-Agent': 'yats'
}
if user:
headers['Authorization'] = 'Basic %s' % base64.b64encode('%s:%s' % (user, password))
try:
h = httplib2.Http()
header, content = h.request('https://api.github.com/repos/%s/%s/tags' % (owner, repo), 'GET', headers=headers)
if header['status'] != '200':
print('ERROR fetching data from GitHub: %s' % content)
return ()
except Exception:
print('ERROR fetching data from GitHub')
return ()
tags = json.loads(content)
for tag in tags:
result.append((tag['name'], tag['name'],))
cache.set(cache_name, result, 60 * 10)
return tuple(reversed(sorted(result)))
BILLING_TYPE_CHOICES = (
('service', 'service'),
('development', 'development'),
)
class test(tickets):
component = models.ForeignKey(ticket_component, on_delete=models.CASCADE, verbose_name=_('component'))
version = models.CharField(_('version'), max_length=255, choices=lazy(getGibthubTags, tuple)())
keywords = models.CharField(_('keywords'), max_length=255, blank=True)
reproduction = models.TextField(_('reproduction'), null=True)
billing_needed = models.NullBooleanField(_('billing needed'), default=True)
billing_done = models.NullBooleanField(_('billing done'), default=None)
billing_reason = models.TextField(_('billing reason'), null=True, blank=True)
billing_estimated_time = models.FloatField(_('billing estimated time'), null=True, blank=True)
billing_time_taken = models.FloatField(_('billing tike taken'), null=True, blank=True)
billing_type = models.CharField(_('billing type'), max_length=255, choices=BILLING_TYPE_CHOICES, null=True, blank=True)
solution = models.TextField(_('solution'), null=True, blank=True)
fixed_in_version = models.CharField(_('fixed in version'), max_length=255, choices=lazy(getGibthubTags, tuple)(), blank=True)
deadline = models.DateTimeField(_('deadline'), null=True, blank=True)
def is_late(self):
if self.deadline < datetime.date.today():
return 2
if self.deadline < datetime.date.today() + datetime.timedelta(days=7):
return 1
return 0
| mit | -4,443,041,059,347,932,700 | 33.632653 | 129 | 0.652033 | false |
hh-italian-group/h-tautau | Production/crab/split_dataset.py | 1 | 2726 | #!/usr/bin/env python
# Create json files to split dataset into several parts.
# This file is part of https://github.com/hh-italian-group/h-tautau.
import argparse
from sets import Set
from FWCore.PythonUtilities.LumiList import LumiList
from dbs.apis.dbsClient import DbsApi
parser = argparse.ArgumentParser(description='Create json files to split dataset into several parts.',
formatter_class = lambda prog: argparse.HelpFormatter(prog,width=90))
parser.add_argument('--dataset', required=True, dest='dataset', type=str, help="Dataset name")
parser.add_argument('--output-prefix', required=True, dest='output_prefix', type=str,
help="Prefix for output splitted json files")
parser.add_argument('--output-suffix', required=False, dest='output_suffix', type=str, default='sub',
help="Prefix for output splitted json files")
parser.add_argument('--n-splits', required=True, dest='n_splits', type=int, help="Number of splits")
args = parser.parse_args()
if args.n_splits < 1:
raise RuntimeError('Number of splits should be >= 1.')
def FindMaxLumi(dbs, dataset):
blocks = dbs.listBlocks(dataset=dataset)
max_lumi = 0
for block_entry in blocks:
block_lumis = dbs.listFileLumis(block_name=block_entry['block_name'])
for file_entry in block_lumis:
file_lumis = file_entry['lumi_section_num']
max_file_lumi = max(file_lumis)
max_lumi = max(max_lumi, max_file_lumi)
return max_lumi
def GetRunList(dbs, dataset):
runs = dbs.listRuns(dataset=dataset)
run_list = []
for run in runs:
run_list.extend(run['run_num'])
run_set = Set(run_list)
return list(run_set)
def SaveLumis(file_name, lumis):
lumi_file = open(file_name, 'w')
lumi_file.write(str(lumis))
lumi_file.close()
dbs = DbsApi('https://cmsweb.cern.ch/dbs/prod/global/DBSReader')
print("Loading runs...")
runs = GetRunList(dbs, args.dataset)
if len(runs) != 1:
raise RuntimeError('Only datasets with one run are currently supported.')
print("Loading lumis...")
max_lumi = FindMaxLumi(dbs, args.dataset)
splits = [ int(float(n + 1) / args.n_splits * max_lumi) for n in range(0, args.n_splits) ]
print("Max lumi: {}".format(max_lumi))
print("Lumi splits: {}".format(splits))
last_lumi = 0
for split_number in range(0, len(splits)):
split = splits[split_number]
lumis = {}
lumis[runs[0]] = []
lumis[runs[0]].append([last_lumi + 1, split])
file_name = '{}_{}{}.json'.format(args.output_prefix, args.output_suffix, split_number + 1)
SaveLumis(file_name, LumiList(compactList=lumis))
last_lumi = split
print("Dataset lumis are split into {} parts.".format(args.n_splits))
| gpl-2.0 | -1,129,987,620,451,551,900 | 37.394366 | 102 | 0.676816 | false |
UPDDI/mps-database-server | assays/forms.py | 1 | 247404 | import datetime
from django import forms
from django.contrib.auth.models import Group
from django.forms.models import (
BaseInlineFormSet,
inlineformset_factory,
BaseModelFormSet,
modelformset_factory,
)
from cellsamples.models import Biosensor
from assays.models import (
AssayStudyConfiguration,
AssayStudy,
AssayStudySupportingData,
AssayStudyAssay,
AssayMatrix,
AssayCategory,
TEST_TYPE_CHOICES,
PhysicalUnits,
AssaySampleLocation,
AssaySetting,
AssaySetupCompound,
AssaySetupCell,
AssaySetupSetting,
AssayMatrixItem,
AssayStudyStakeholder,
AssayTarget,
AssayMethod,
AssayStudyModel,
AssayStudySet,
AssayReference,
AssayStudyReference,
AssayStudySetReference,
AssayTarget,
AssayMeasurementType,
AssayMethod,
AssaySetting,
AssaySupplier,
AssayCategory,
AssayPlateReaderMap,
AssayPlateReaderMapItem,
AssayPlateReaderMapItemValue,
AssayPlateReaderMapDataFile,
AssayPlateReaderMapDataFileBlock,
# ...
AssayGroup,
AssayGroupCell,
AssayGroupCompound,
AssayGroupSetting,
assay_plate_reader_time_unit_choices,
assay_plate_reader_main_well_use_choices,
assay_plate_reader_blank_well_use_choices,
assay_plate_reader_map_info_plate_size_choices,
assay_plate_reader_volume_unit_choices,
assay_plate_reader_file_delimiter_choices,
upload_file_location,
AssayOmicDataFileUpload,
AssayOmicDataPoint,
AssayOmicAnalysisTarget,
AssayOmicSampleMetadata,
# AssayOmicDataGroup,
AssayDataFileUpload,
assay_omic_data_type_choices,
)
from compounds.models import Compound, CompoundInstance, CompoundSupplier
from microdevices.models import (
MicrophysiologyCenter,
Microdevice,
OrganModel,
OrganModelProtocol,
OrganModelLocation,
)
from mps.forms import SignOffMixin, BootstrapForm, tracking
import string
from captcha.fields import CaptchaField
from .utils import (
# validate_file,
# get_chip_details,
# get_plate_details,
TIME_CONVERSIONS,
# EXCLUDED_DATA_POINT_CODE,
AssayFileProcessor,
get_user_accessible_studies,
plate_reader_data_file_process_data,
CALIBRATION_CURVE_MASTER_DICT,
calibration_choices,
omic_data_file_processing_data_main_for_all_data_types,
COLUMN_HEADERS,
omic_data_quality_clean_check_for_omic_file_upload,
omic_metadata_find_the_labels_needed_for_the_indy_metadata_table,
sck_general_convert_time_from_mintues_to_unit_given,
sck_general_convert_time_unit_given_to_minutes,
sck_general_given_pk_of_organ_model_make_dictionary_with_location_pk_and_location_name,
data_quality_clean_check_for_omic_metadata_empty_fields,
omic_process_the_omic_sample_metadata,
omic_data_quality_clean_check_for_omic_form_fields,
)
from mps.utils import (
get_split_times,
)
from django.utils import timezone
from mps.templatetags.custom_filters import is_group_admin, filter_groups, ADMIN_SUFFIX
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
from mps.settings import MEDIA_ROOT
import ujson as json
import os
import csv
import re
import operator
# TODO REFACTOR WHITTLING TO BE HERE IN LIEU OF VIEW
# TODO REFACTOR FK QUERYSETS TO AVOID N+1
# These are all of the tracking fields
# tracking = (
# 'created_by',
# 'created_on',
# 'modified_on',
# 'modified_by',
# 'signed_off_by',
# 'signed_off_date',
# 'locked',
# 'restricted'
# )
# Excluding restricted is likewise useful
restricted = ('restricted',)
# Group
group = ('group',)
# For flagging
flag_group = (
'flagged',
'reason_for_flag'
)
def get_dic_for_custom_choice_field(form, filters=None):
dic = {}
fields = form.custom_fields
parent = form.model
for field in fields:
model = parent._meta.get_field(field).related_model
if filters and filters.get(field, None):
dic.update({
field: {str(instance.id): instance for instance in model.objects.filter(**filters.get(field))}
})
else:
dic.update({
field: {str(instance.id): instance for instance in model.objects.all()}
})
return dic
class SetupFormsMixin(BootstrapForm):
### ADDING SETUP CELLS
cell_cell_sample = forms.IntegerField(required=False)
cell_biosensor = forms.ModelChoiceField(
queryset=Biosensor.objects.all().prefetch_related('supplier'),
required=False,
# Default is naive
initial=2
)
cell_density = forms.FloatField(required=False)
# TODO THIS IS TO BE HAMMERED OUT
cell_density_unit = forms.ModelChoiceField(
queryset=PhysicalUnits.objects.filter(
availability__contains='cell'
).order_by('unit'),
required=False
)
cell_passage = forms.CharField(required=False)
cell_addition_location = forms.ModelChoiceField(
# Avoid duplicate query
queryset=AssaySampleLocation.objects.all().order_by('name'),
# queryset=AssaySampleLocation.objects.none(),
required=False
)
### ?ADDING SETUP SETTINGS
setting_setting = forms.ModelChoiceField(
queryset=AssaySetting.objects.all().order_by('name'),
required=False
)
setting_unit = forms.ModelChoiceField(
queryset=PhysicalUnits.objects.all().order_by('base_unit','scale_factor'),
required=False
)
setting_value = forms.CharField(required=False)
setting_addition_location = forms.ModelChoiceField(
# Avoid duplicate query
queryset=AssaySampleLocation.objects.all().order_by('name'),
# queryset=AssaySampleLocation.objects.none(),
required=False
)
### ADDING COMPOUNDS
compound_compound = forms.ModelChoiceField(
queryset=Compound.objects.all().order_by('name'),
required=False
)
# Notice the special exception for %
compound_concentration_unit = forms.ModelChoiceField(
queryset=(PhysicalUnits.objects.filter(
unit_type__unit_type='Concentration'
).order_by(
'base_unit__unit',
'scale_factor'
) | PhysicalUnits.objects.filter(unit='%')),
required=False, initial=4
)
compound_concentration = forms.FloatField(required=False)
compound_addition_location = forms.ModelChoiceField(
# Avoid duplicate query
queryset=AssaySampleLocation.objects.all().order_by('name'),
# queryset=AssaySampleLocation.objects.none(),
required=False
)
# Text field (un-saved) for supplier
compound_supplier_text = forms.CharField(
required=False,
initial=''
)
# Text field (un-saved) for lot
compound_lot_text = forms.CharField(
required=False,
initial=''
)
# Receipt date
compound_receipt_date = forms.DateField(required=False)
# For MPS Models etc.
test_type = forms.ChoiceField(
initial='control',
choices=TEST_TYPE_CHOICES,
required=False
)
organ_model_full = forms.ModelChoiceField(
queryset=OrganModel.objects.all().order_by('name'),
required=False,
label='Matrix Item MPS Model'
)
organ_model_protocol_full = forms.ModelChoiceField(
queryset=OrganModelProtocol.objects.all().order_by('name'),
required=False,
label='Matrix Item MPS Model Version'
)
def __init__(self, *args, **kwargs):
super(SetupFormsMixin, self).__init__(*args, **kwargs)
sections_with_times = (
'compound',
'cell',
'setting'
)
for time_unit in list(TIME_CONVERSIONS.keys()):
for current_section in sections_with_times:
# Create fields for Days, Hours, Minutes
self.fields[current_section + '_addition_time_' + time_unit] = forms.FloatField(
initial=0,
required=False,
widget=forms.NumberInput(attrs={
'class': 'form-control required',
'style': 'width:75px;'
})
)
self.fields[current_section + '_duration_' + time_unit] = forms.FloatField(
initial=0,
required=False,
widget=forms.NumberInput(attrs={
'class': 'form-control required',
'style': 'width:75px;'
})
)
self.fields['cell_cell_sample'].widget.attrs['style'] = 'width:75px;'
self.fields['cell_passage'].widget.attrs['style'] = 'width:75px;'
# DUMB, BAD (can't have them be "actually" required or they prevent submission
add_required_to = [
'cell_cell_sample',
'cell_biosensor',
'cell_density',
'cell_density_unit',
'cell_addition_location',
'setting_setting',
'setting_unit',
'setting_value',
'setting_addition_location',
'compound_compound',
'compound_concentration_unit',
'compound_concentration',
'compound_addition_location',
]
for current_field in add_required_to:
self.fields[current_field].widget.attrs['class'] += ' required'
# Sloppy
if hasattr(self.fields[current_field], '_queryset'):
if hasattr(self.fields[current_field]._queryset, 'model'):
# Usually one would use a hyphen rather than an underscore
# self.fields[field].widget.attrs['data-app'] = self.fields[field]._queryset.model._meta.app_label
self.fields[current_field].widget.attrs['data_app'] = self.fields[current_field]._queryset.model._meta.app_label
# self.fields[field].widget.attrs['data-model'] = self.fields[field]._queryset.model._meta.object_name
self.fields[current_field].widget.attrs['data_model'] = self.fields[current_field]._queryset.model._meta.object_name
self.fields[current_field].widget.attrs['data_verbose_name'] = self.fields[current_field]._queryset.model._meta.verbose_name
# Possibly dumber
# In Bootstrap Form
# if hasattr(self.fields[current_field]._queryset.model, 'get_add_url_manager'):
# self.fields[current_field].widget.attrs['data_add_url'] = self.fields[current_field]._queryset.model.get_add_url_manager()
# Avoid duplicate queries for the sample locations
# sample_locations = AssaySampleLocation.objects.all().order_by('name')
# self.fields['cell_addition_location'].queryset = sample_locations
# self.fields['compound_addition_location'].queryset = sample_locations
# self.fields['setting_addition_location'].queryset = sample_locations
# CRUDE: MAKE SURE NO SELECTIZE INTERFERING
self.fields['organ_model_full'].widget.attrs['class'] = 'no-selectize'
self.fields['organ_model_protocol_full'].widget.attrs['class'] = 'no-selectize'
self.fields['test_type'].widget.attrs['class'] = 'no-selectize'
# DEPRECATED NO LONGER NEEDED AS CHARFIELDS NOW STRIP AUTOMATICALLY
class ModelFormStripWhiteSpace(BootstrapForm):
"""Strips the whitespace from char and text fields"""
def clean(self):
cd = self.cleaned_data
for field_name, field in list(self.fields.items()):
if isinstance(field, forms.CharField):
if self.fields[field_name].required and not cd.get(field_name, None):
self.add_error(field_name, "This is a required field.")
else:
cd[field_name] = cd[field_name].strip()
return super(ModelFormStripWhiteSpace, self).clean()
class ModelFormSplitTime(BootstrapForm):
def __init__(self, *args, **kwargs):
super(ModelFormSplitTime, self).__init__(*args, **kwargs)
for time_unit in list(TIME_CONVERSIONS.keys()):
if self.fields.get('addition_time', None):
# Create fields for Days, Hours, Minutes
self.fields['addition_time_' + time_unit] = forms.FloatField(
initial=0,
widget=forms.NumberInput(attrs={
'class': 'form-control',
'style': 'width:75px;'
})
)
# Set default
self.fields['addition_time_' + time_unit].widget.attrs['data-default'] = 0
if self.fields.get('duration', None):
self.fields['duration_' + time_unit] = forms.FloatField(
initial=0,
widget=forms.NumberInput(attrs={
'class': 'form-control',
'style': 'width:75px;'
})
)
# Set default
self.fields['duration_' + time_unit].widget.attrs['data-default'] = 0
# Fill additional time
if self.fields.get('addition_time', None):
addition_time_in_minutes_remaining = getattr(self.instance, 'addition_time', 0)
if not addition_time_in_minutes_remaining:
addition_time_in_minutes_remaining = 0
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
initial_time_for_current_field = int(addition_time_in_minutes_remaining / conversion)
if initial_time_for_current_field:
self.fields['addition_time_' + time_unit].initial = initial_time_for_current_field
addition_time_in_minutes_remaining -= initial_time_for_current_field * conversion
# Add fractions of minutes if necessary
if addition_time_in_minutes_remaining:
self.fields['addition_time_minute'].initial += addition_time_in_minutes_remaining
# Fill duration
if self.fields.get('duration', None):
duration_in_minutes_remaining = getattr(self.instance, 'duration', 0)
if not duration_in_minutes_remaining:
duration_in_minutes_remaining = 0
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
initial_time_for_current_field = int(duration_in_minutes_remaining / conversion)
if initial_time_for_current_field:
self.fields['duration_' + time_unit].initial = initial_time_for_current_field
duration_in_minutes_remaining -= initial_time_for_current_field * conversion
# Add fractions of minutes if necessary
if duration_in_minutes_remaining:
self.fields['duration_minute'].initial += duration_in_minutes_remaining
def clean(self):
cleaned_data = super(ModelFormSplitTime, self).clean()
if cleaned_data and not cleaned_data.get('DELETE', False):
cleaned_data.update({
'addition_time': 0,
'duration': 0
})
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
cleaned_data.update({
'addition_time': cleaned_data.get('addition_time') + cleaned_data.get('addition_time_' + time_unit,
0) * conversion,
'duration': cleaned_data.get('duration') + cleaned_data.get('duration_' + time_unit, 0) * conversion
})
return cleaned_data
# TODO TODO TODO PLEASE, PLEASE GET RID OF THIS TRASH!
class BaseModelFormSetForcedUniqueness(BaseModelFormSet):
def clean(self):
self.validate_unique()
def validate_unique(self):
# Collect unique_checks and date_checks to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form not in forms_to_delete and form.is_valid()]
for form in valid_forms:
# exclude = form._get_validation_exclusions()
# unique_checks, date_checks = form.instance._get_unique_checks(exclude=exclude)
unique_checks, date_checks = form.instance._get_unique_checks()
all_unique_checks = all_unique_checks.union(set(unique_checks))
all_date_checks = all_date_checks.union(set(date_checks))
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# PLEASE NOTE: SPECIAL EXCEPTION FOR FORMS WITH NO ID TO AVOID TRIGGERING ID DUPLICATE
if unique_check == ('id',) and not form.cleaned_data.get('id', ''):
# IN POOR TASTE, BUT EXPEDIENT
continue
# get data for each field of each of unique_check
# PLEASE NOTE THAT THIS GETS ALL FIELDS, EVEN IF NOT IN THE FORM
row_data = (
form.cleaned_data[field] if field in form.cleaned_data else getattr(form.instance, field, None) for field in unique_check
)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d for d in row_data)
# if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
# iterate over each of the date checks now
for date_check in all_date_checks:
seen_data = set()
uclass, lookup, field, unique_for = date_check
for form in valid_forms:
# see if we have data for both fields
if (form.cleaned_data and form.cleaned_data[field] is not None and form.cleaned_data[unique_for] is not None):
# if it's a date lookup we need to get the data for all the fields
if lookup == 'date':
date = form.cleaned_data[unique_for]
date_data = (date.year, date.month, date.day)
# otherwise it's just the attribute on the date/datetime
# object
else:
date_data = (getattr(form.cleaned_data[unique_for], lookup),)
data = (form.cleaned_data[field],) + date_data
# if we've already seen it then we have a uniqueness failure
if data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_date_error_message(date_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(data)
if errors:
raise forms.ValidationError(errors)
# TODO TODO TODO WILL NEED TO CHANGE THIS WITH DJANGO VERSION NO DOUBT
class BaseInlineFormSetForcedUniqueness(BaseModelFormSetForcedUniqueness, BaseInlineFormSet):
def clean(self):
self.validate_unique()
class DicModelChoiceField(forms.Field):
"""Special field using dictionary instead of queryset as choices
This is to prevent ludicrous numbers of queries
"""
widget = forms.TextInput
def __init__(self, name, parent, dic, *args, **kwargs):
self.name = name
self.parent = parent
self.dic = dic
self.model = self.parent._meta.get_field(self.name).related_model
super(DicModelChoiceField, self).__init__(*args, **kwargs)
# Make sure required is set properly
self.required = self.widget.required = not (
self.parent._meta.get_field(self.name).null
and
self.parent._meta.get_field(self.name).blank
)
def to_python(self, value):
if value in self.empty_values:
return None
try:
value = self.dic.get(self.name).get(value)
except:
raise forms.ValidationError(self.error_messages['invalid_choice'], code='invalid_choice')
return value
def valid_value(self, value):
"""Check to see if the provided value is a valid choice"""
if str(value.id) in self.dic.get(self.name):
return True
return False
class AssayStudyConfigurationForm(SignOffMixin, BootstrapForm):
"""Frontend Form for Study Configurations"""
class Meta(object):
model = AssayStudyConfiguration
widgets = {
'name': forms.Textarea(attrs={'cols': 50, 'rows': 1}),
'media_composition': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
'hardware_description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
exclude = tracking
class AssayStudyModelForm(BootstrapForm):
class Meta(object):
model = AssayStudyModel
exclude = ('' ,)
def __init__(self, *args, **kwargs):
super(AssayStudyModelForm, self).__init__(*args, **kwargs)
self.fields['label'].widget.attrs.update({
'size': '4',
'max_length': '2'
})
self.fields['sequence_number'].widget.attrs.update({
'size': '4',
'max_length': '2'
})
self.fields['output'].widget.attrs.update({
'size': '20',
'max_length': '20'
})
# FormSet for Study Models
AssayStudyModelFormSet = inlineformset_factory(
AssayStudyConfiguration,
AssayStudyModel,
extra=1,
form=AssayStudyModelForm,
widgets={
'label': forms.TextInput(attrs={'size': 2}),
'sequence_number': forms.TextInput(attrs={'size': 2})
}
)
def label_to_number(label):
"""Returns a numeric index from an alphabetical index"""
num = 0
for char in label:
if char in string.ascii_letters:
num = num * 26 + (ord(char.upper()) - ord('A')) + 1
return num
# Now uses unicode instead of string
def stringify_excel_value(value):
"""Given an excel value, return a unicode cast of it
This also converts floats to integers when possible
"""
# If the value is just a string literal, return it
if type(value) == str or type(value) == str:
return str(value)
else:
try:
# If the value can be an integer, make it into one
if int(value) == float(value):
return str(int(value))
else:
return str(float(value))
except:
return str(value)
class AssayStudyAssayInlineFormSet(BaseInlineFormSet):
def __init__(self, *args, **kwargs):
"""Init APRA inline
Filters units so that only units marked 'readout' appear
"""
super(AssayStudyAssayInlineFormSet, self).__init__(*args, **kwargs)
target_queryset = AssayTarget.objects.all().order_by('name')
method_queryset = AssayMethod.objects.all().order_by('name')
# unit_queryset = PhysicalUnits.objects.filter(
# availability__icontains='readout'
# ).order_by('unit_type__unit_type', 'base_unit__unit', 'scale_factor')
unit_queryset = PhysicalUnits.objects.order_by('unit_type__unit_type', 'base_unit__unit', 'scale_factor')
category_queryset = AssayCategory.objects.all().order_by('name')
for form in self.forms:
form.fields['target'].queryset = target_queryset
form.fields['method'].queryset = method_queryset
form.fields['unit'].queryset = unit_queryset
form.fields['category'] = forms.ModelChoiceField(
queryset=category_queryset,
required=False,
empty_label='All'
)
class ReadyForSignOffForm(forms.Form):
captcha = CaptchaField()
message = forms.TextInput()
# TODO PLEASE REVIEW
class AssayStudyForm(SignOffMixin, BootstrapForm):
def __init__(self, *args, **kwargs):
"""Init the Study Form
Kwargs:
groups -- a queryset of groups (allows us to avoid N+1 problem)
"""
super(AssayStudyForm, self).__init__(*args, **kwargs)
self.fields['group'].queryset = filter_groups(self.user)
# Crudely force required class
for current_field in ['total_device_volume', 'flow_rate', 'number_of_relevant_cells']:
self.fields[current_field].widget.attrs['class'] += ' required'
class Meta(object):
model = AssayStudy
widgets = {
'assay_run_id': forms.Textarea(attrs={'rows': 1}),
'name': forms.Textarea(attrs={'rows': 1}),
'description': forms.Textarea(attrs={'rows': 5, 'cols': 100}),
}
exclude = tracking + restricted + ('access_groups', 'signed_off_notes', 'bulk_file', 'collaborator_groups')
def clean(self):
"""Checks for at least one study type"""
# clean the form data, before validation
data = super(AssayStudyForm, self).clean()
if not any([data['toxicity'], data['efficacy'], data['disease'], data['cell_characterization'], data['omics'], data['pbpk_steady_state'], data['pbpk_bolus']]):
raise forms.ValidationError('Please select at least one study type')
if data.get('pbpk_steady_state', '') and (not data.get('number_of_relevant_cells', '') or not data.get('flow_rate', '')):
raise forms.ValidationError('Continuous Infusion PBPK Requires Number of Cells Per MPS Model and Flow Rate')
if data.get('pbpk_bolus', '') and (not data.get('number_of_relevant_cells', '') or not data.get('total_device_volume', '')):
raise forms.ValidationError('Bolus PBPK Requires Number of Cells Per MPS Model and Total Device Volume')
return data
class AssayStudyDetailForm(SignOffMixin, BootstrapForm):
def __init__(self, *args, **kwargs):
super(AssayStudyDetailForm, self).__init__(*args, **kwargs)
# Get valid groups for the dropdown
self.fields['group'].queryset = filter_groups(self.user)
# Crudely force required class
for current_field in ['total_device_volume', 'flow_rate', 'number_of_relevant_cells']:
self.fields[current_field].widget.attrs['class'] += ' required'
class Meta(object):
model = AssayStudy
widgets = {
'name': forms.Textarea(attrs={'rows': 1}),
'description': forms.Textarea(attrs={'rows': 5, 'cols': 100}),
}
# Since we are splitting into multiple forms, includes are safer
fields = (
'group',
'toxicity',
'efficacy',
'disease',
'cell_characterization',
'omics',
'diseases',
'start_date',
'use_in_calculations',
'protocol',
'image',
'pbpk_steady_state',
'pbpk_bolus',
'number_of_relevant_cells',
'total_device_volume',
'flow_rate',
'name',
'description',
) + flag_group
def clean(self):
"""Checks for at least one study type"""
# clean the form data, before validation
data = super(AssayStudyDetailForm, self).clean()
if not any([data['toxicity'], data['efficacy'], data['disease'], data['cell_characterization'], data['omics'], data['pbpk_steady_state'], data['pbpk_bolus']]):
raise forms.ValidationError('Please select at least one study type')
if data.get('pbpk_steady_state', '') and (not data.get('number_of_relevant_cells', '') or not data.get('flow_rate', '')):
raise forms.ValidationError('Continuous Infusion PBPK Requires Number of Cells Per MPS Model and Flow Rate')
if data.get('pbpk_bolus', '') and (not data.get('number_of_relevant_cells', '') or not data.get('total_device_volume', '')):
raise forms.ValidationError('Bolus PBPK Requires Number of Cells Per MPS Model and Total Device Volume')
return data
# TODO: OBVIOUSLY, WE WOULD RATHER NOT USE THIS FOR DETAIL PAGES ETC.
# Instead we could hook up an AJAX request that does this? We would need to revise the difference table generation
class AssayStudyGroupForm(SetupFormsMixin, SignOffMixin, BootstrapForm):
# CONTRIVANCES
# test_type = forms.ChoiceField(
# initial='control',
# choices=TEST_TYPE_CHOICES,
# required=False
# )
# organ_model_full = forms.ModelChoiceField(
# queryset=OrganModel.objects.all().order_by('name'),
# required=False,
# label='Matrix Item MPS Model'
# )
# organ_model_protocol_full = forms.ModelChoiceField(
# queryset=OrganModelProtocol.objects.all().order_by('name'),
# required=False,
# label='Matrix Item MPS Model Version'
# )
# number_of_items = forms.CharField(
# initial='',
# required=False
# )
# group_name = forms.CharField(
# initial='',
# required=False
# )
# CONTRIVED!
series_data = forms.CharField(required=False)
# Contrivance
organ_model = forms.ModelChoiceField(
queryset=OrganModel.objects.all().order_by('name'),
required=False,
label='Matrix Item MPS Model'
)
update_group_fields = [
'name',
'test_type',
'organ_model_id',
'organ_model_protocol_id',
]
class Meta(object):
model = AssayStudy
# Since we are splitting into multiple forms, includes are safer
# Only temporary, will change when finished
fields = (
# TEMPORARY ->
'series_data',
# <- TEMPORARY
'test_type',
'organ_model',
'organ_model_full',
# 'group_name',
# TEMP!
'organ_model_protocol',
'organ_model_protocol_full',
'cell_cell_sample',
'cell_biosensor',
'cell_density',
'cell_density_unit',
'cell_passage',
'cell_addition_location',
'setting_setting',
'setting_unit',
'setting_value',
'setting_addition_location',
'compound_compound',
'compound_concentration_unit',
'compound_concentration',
'compound_addition_location',
'compound_supplier_text',
'compound_lot_text',
'compound_receipt_date',
) + flag_group
def __init__(self, *args, **kwargs):
# TODO TODO TODO REVISE REVISE REVISE
# WE PROBABLY WON'T NEED THIS KWARG AFTER WE FIX DIFFERENCE TABLE DISPLAY TO NO LONGER USE KLUDGE
special_filter = kwargs.pop('special_filter', '')
# LIKEWISE CONTRIVED
get_chips = kwargs.pop('get_chips', True)
super(AssayStudyGroupForm, self).__init__(*args, **kwargs)
# Contrivances
self.fields['test_type'].widget.attrs['class'] = 'no-selectize required form-control'
# Prepopulate series_data from thing
# PLEASE NOTE: The special_filter is contrived
self.fields['series_data'].initial = self.instance.get_group_data_string(get_chips=get_chips, special_filter=special_filter)
def clean(self):
"""Checks for at least one study type"""
# clean the form data, before validation
data = super(AssayStudyGroupForm, self).clean()
# SLOPPY NOT DRY
new_setup_data = {}
# This matrix is only for chips
# WARNING: THIS WILL BREAK IN STUDIES WITH MULTIPLE CHIP SETS
# IMPORTANT NOTE: WHEN BACK-FILLING, WE WILL NEED TO CONSOLIDATE CHIP MATRICES! Otherwise this flow will not work correctly...
current_matrix = AssayMatrix.objects.filter(
# The study must exist in order to visit this page, so getting the id this was is fine
study_id=self.instance.id,
representation='chips'
)
# Current group ids so that we can match for deletes and edits
current_groups = AssayGroup.objects.filter(
study_id=self.instance.id
)
current_group_ids = {
group.id: group for group in current_groups
}
# Exceedingly stupid: kludge for the "re-order" problem
special_group_handling_required = False
# Crude, but we need current groups
current_group_names = {group.name: group.id for group in current_group_ids.values()}
if current_matrix:
current_matrix = current_matrix[0].id
else:
current_matrix = None
# Ditto for items (chips, in this case as wells are managed elsewhere)
# We only care about chips, at the moment
current_items = AssayMatrixItem.objects.filter(
study_id=self.instance.id,
matrix_id=current_matrix
)
current_item_ids = {
item.id: item for item in current_items
}
# For auto-name assignation
current_item_names = {
item.name: True for item in current_items
}
# TODO TODO TODO CONTRIVED: CHECK MAX FOR COLUMN INDEX ASSIGNATION
current_column_max = max([int(column_index) for column_index in current_items.values_list('column_index', flat=True)], default=-1)
# Need to get the current groups (this could be an edit of groups)
new_groups = None
# Note that the instance is None for new adds, of course
current_groups = AssayGroup.objects.filter(study_id=self.instance.id)
new_items = None
# Likewise with chips, some may need to be edited or removed etc.
# DO NOT DUPLICATE QUERY
# current_items = AssayMatrixItem.objects.filter(matrix_id=current_matrix)
# This is supposed to contain data for cells, compounds, and settings (perhaps more later)
new_related = None
# Just have the errors be non-field errors for the moment
all_errors = {'series_data': [], '__all__': []}
current_errors = all_errors.get('series_data')
non_field_errors = all_errors.get('__all__')
# Am I sticking with the name 'series_data'?
if self.cleaned_data.get('series_data', None):
all_data = json.loads(self.cleaned_data.get('series_data', '[]'))
else:
# Contrived defaults
all_data = {
'series_data': [],
'chips': [],
'plates': {}
}
# The data for groups is currently stored in series_data
all_setup_data = all_data.get('series_data')
all_chip_data = all_data.get('chips')
# Catch technically empty setup data
setup_data_is_empty = True
for group_set in all_setup_data:
if group_set:
setup_data_is_empty = not any(group_set.values())
if setup_data_is_empty:
all_setup_data = []
# if commit and all_setup_data:
# SEE BASE MODELS FOR WHY COMMIT IS NOT HERE
if all_setup_data:
created_by = self.user
created_on = timezone.now()
# current_item_number = 1
# CRUDE: JUST MAKE ONE LARGE ROW?
number_of_items = 0
for setup_group in all_setup_data:
if setup_group.get('number_of_items'):
number_of_items += int(setup_group.get('number_of_items', '0'))
# Alternative for one row per group
# # Find max for number of columns
# number_of_columns = 0
# for setup_group in all_setup_data:
# if int(setup_group.get('number_of_items', '0')) > number_of_columns:
# number_of_columns = int(setup_group.get('number_of_items', '0'))
if not current_matrix:
new_matrix = AssayMatrix(
# Just name the chip matrix the same thing as the study?
# PLEASE NOTE: Study names can be huge for some reason
# FOR NOW: just make sure they can save
# We should edit max length of study name later
name=self.instance.name[:255],
# Does not work with plates at the moment
representation='chips',
study=self.instance,
# Doesn't matter for chips
device=None,
organ_model=None,
# Alternative that looks nicer, but these matrices probably won't be accessible anyway
# number_of_rows=len(all_setup_data),
# number_of_columns=number_of_columns,
number_of_rows=1,
number_of_columns=number_of_items,
created_by=created_by,
created_on=created_on,
modified_by=created_by,
modified_on=created_on,
)
try:
new_matrix.full_clean()
except forms.ValidationError as e:
non_field_errors.append(e)
else:
new_matrix = None
# COMPOUND STUFF BECAUSE COMPOUND SCHEMA IS MISERABLE
# Get all chip setup assay compound instances
assay_compound_instances = {}
# Get all Compound Instances
compound_instances = {
(
instance.compound.id,
instance.supplier.id,
instance.lot,
str(instance.receipt_date)
): instance for instance in CompoundInstance.objects.all().prefetch_related(
'compound',
'supplier'
)
}
# Get all suppliers
suppliers = {
supplier.name: supplier for supplier in CompoundSupplier.objects.all()
}
# SLOPPY TODO TODO TODO
# Would be much neater to have this in an object or something
new_groups = []
update_groups = []
deleted_groups = []
# CHECK UNIQUENESS
group_names = {}
new_items = []
update_items = []
deleted_items = []
# Superfluous?
new_item_to_group_name = {}
new_cells = []
update_cells = []
deleted_cells = []
new_compounds = []
update_compounds = []
deleted_compounds = []
new_settings = []
update_settings = []
deleted_settings = []
# For now, chips are are all in one row
for setup_row, setup_group in enumerate(all_setup_data):
if setup_group.get('number_of_items') is None or setup_group.get('number_of_items') is '':
continue
items_in_group = int(setup_group.get('number_of_items', '0'))
test_type = setup_group.get('test_type', '')
# To break out to prevent repeat errors
group_has_error = False
# Make the group
# Add the group to the new_groups
# TODO DIFFERENTIATE NEW AND EXISTING GROUPS HERE
# We can identify and existing group by checking for an id
current_group = current_group_ids.get(int(setup_group.get('id', 0)), None)
if current_group:
# ???
# new_group = current_group
if setup_group.get('deleted', False):
deleted_groups.append(current_group.id)
else:
# TODO LOGIC FOR UPDATE HERE?
# We need to update name, test_type, organ_model, and organ_model_protocol
# IDEALLY ONLY IF THEY NEED TO BE UPDATED
group_needs_to_be_updated = False
for field in self.update_group_fields:
if getattr(current_group, field) != setup_group.get(field, None):
# Contrived: Replace empty string with None
if field.endswith('id') and setup_group.get(field, None) is '':
setattr(current_group, field, None)
else:
setattr(current_group, field, setup_group.get(field, None))
group_needs_to_be_updated = True
if group_needs_to_be_updated:
try:
# Interestingly, we must exclude name
# Please see the Chips form about the the re-order-kludge
current_group.full_clean(
exclude=['name']
)
update_groups.append(current_group)
# MUST BE MODIFIED TO ADD TO CORRECT ROW (we could display all above too?)
except forms.ValidationError as e:
current_errors.append(
process_error_with_annotation(
'group',
setup_row,
0,
e
)
)
group_has_error = True
# Add to group names
# Check uniqueness
if setup_group.get('name', '') in group_names:
non_field_errors.append('The Group name "{}" is duplicated. The names of Groups must be unique.'.format(
setup_group.get('name', '')
))
else:
group_names.update({
setup_group.get('name', ''): True
})
# Note that we need special handling when there is a conflict in current group names
if setup_group.get('name', '') in current_group_names and setup_group.get('id', 0) != current_group_names.get(setup_group.get('name', '')):
special_group_handling_required = True
else:
# CRUDE
current_organ_model_id = setup_group.get('organ_model_id', None)
if current_organ_model_id:
current_organ_model_id = int(current_organ_model_id)
else:
current_organ_model_id = None
current_organ_model_protocol_id = setup_group.get('organ_model_protocol_id', None)
if current_organ_model_protocol_id:
current_organ_model_protocol_id = int(current_organ_model_protocol_id)
else:
current_organ_model_protocol_id = None
new_group = AssayGroup(
# Study should just be instance
study=self.instance,
name=setup_group.get('name', ''),
test_type=setup_group.get('test_type', ''),
organ_model_id=current_organ_model_id,
organ_model_protocol_id=current_organ_model_protocol_id,
)
# TODO Logic for first clean and adding to new_groups here
try:
# I think we are fine with no exclusions
new_group.full_clean(
exclude=['name']
)
new_groups.append(new_group)
# MUST BE MODIFIED TO ADD TO CORRECT ROW (we could display all above too?)
except forms.ValidationError as e:
current_errors.append(
process_error_with_annotation(
'group',
setup_row,
0,
e
)
)
group_has_error = True
# Add to group names
# Check uniqueness
if setup_group.get('name', '') in group_names:
non_field_errors.append('The Group name "{}" is duplicated. The names of Groups must be unique.'.format(
setup_group.get('name', '')
))
else:
group_names.update({
setup_group.get('name', ''): True
})
# Note that we need special handling when there is a conflict in current group names
if setup_group.get('name', '') in current_group_names and setup_group.get('id', 0) != current_group_names.get(setup_group.get('name', '')):
special_group_handling_required = True
# Always iterate for cells, compounds, and settings
# Keep in mind that to decrease sparsity related data is now tied to a group
for prefix, current_objects in setup_group.items():
# Related are tied to group, not item
# Groups are INDEX DEPENDENT, *NOT* by ID (group may or may not exist)
# If we don't want to wipe things every time, we CAN'T JUST DO THIS!
# Obviously, we would need to differentiate adds, updates, and deletes
# current_related_list = new_related.setdefault(
# str(setup_row), []
# )
if prefix in ['cell', 'compound', 'setting'] and setup_group[prefix]:
for setup_column, current_object in enumerate(current_objects):
# Just to filter out anything that isn't related data we need
# TODO: NOTE: The big problem here is that we do not differentiate updates and adds!
# That is, we would need to wipe all of the existing related data for this to work...
# That is *possible*, but unwise
# We could, alternatively, see if there is an entry at the INDEX (setup_column)
# This can get quite convoluted! AND ALSO NEEDS TO ACCOMMODATE DELETIONS!
# TODO TODO TODO NOTE: Basically to deal with deletions, we could see if number_of_deletions > number_of_new_columns
# Of course, we get number_of_new_columns from the passed data
# On the other hand, if we didn't really care about maximizing efficiency we could just kill anything marked for deletion
# The performance hit for adding a new entry instead of updating an existing would be negligible
# Why bother twisiting oneself into a knot to do so?
# Besides, if we mark deletions rather than totally removing them, we would know for sure whether "they were in the right column" and thus whether they needed to be added
# Anyway, we would need a query that matched the data to "columns"
# The query for this, hopefully, shouldn't be too big!
# We only care about that which is associated with groups in THIS study, so it should be fine?
# Skip if nothing
if not current_object:
continue
# Crudely convert to int
for current_field, current_value in current_object.items():
if current_field.endswith('_id'):
if current_value:
current_object.update({
current_field: int(current_value)
})
else:
current_object.update({
current_field: None
})
# NOTE TODO TODO TODO
# I am probably just going to blow up all of the old related data for the moment and always add
# This is much faster to write but more expensive than it needs to be
# On the bright side, it won't orphan any data because data is bound to a Group rather than the constituent pieces...
current_object.update({
# GROUP NOT ITEM
# 'group_id': new_group,
# SOMEWHAT TRICKY: START WITH NAME AND OVERWRITE
'group_id': setup_group.get('name', '')
})
# Breaks rule of 3
if prefix == 'cell':
new_cell = AssayGroupCell(**current_object)
try:
new_cell.full_clean(exclude=['group'])
# current_related_list.append(new_cell)
new_cells.append(new_cell)
except forms.ValidationError as e:
# May need to revise process_error
current_errors.append(
process_error_with_annotation(
prefix,
setup_row,
setup_column,
e
)
)
group_has_error = True
elif prefix == 'setting':
new_setting = AssayGroupSetting(**current_object)
try:
new_setting.full_clean(exclude=['group'])
# current_related_list.append(new_setting)
new_settings.append(new_setting)
except forms.ValidationError as e:
current_errors.append(
process_error_with_annotation(
prefix,
setup_row,
setup_column,
e
)
)
group_has_error = True
elif prefix == 'compound':
# CONFUSING NOT DRY BAD
try:
compound = int(current_object.get('compound_id', '0'))
except TypeError:
current_errors.append(
process_error_with_annotation(
prefix,
setup_row,
setup_column,
{
'compound': ['A compound is required']
}
)
)
# CRUDE! Break here to prevent further processing
break
supplier_text = current_object.get('supplier_text', 'N/A').strip()
lot_text = current_object.get('lot_text', 'N/A').strip()
receipt_date = current_object.get('receipt_date', '')
# NOTE THE DEFAULT, PLEASE DO THIS IN A WAY THAT IS MORE DRY
if not supplier_text:
supplier_text = 'N/A'
if not lot_text:
lot_text = 'N/A'
# Check if the supplier already exists
supplier = suppliers.get(supplier_text, '')
concentration = current_object.get('concentration', '0')
# Annoying, bad
if not concentration:
concentration = 0.0
else:
concentration = float(concentration)
concentration_unit_id = current_object.get('concentration_unit_id', '0')
if concentration_unit_id:
concentration_unit_id = int(concentration_unit_id)
else:
concentration_unit_id = None
addition_location_id = current_object.get('addition_location_id', '0')
if addition_location_id:
addition_location_id = int(addition_location_id)
else:
addition_location_id = None
addition_time = current_object.get('addition_time', '0')
duration = current_object.get('duration', '0')
if not addition_time:
addition_time = 0.0
else:
addition_time = float(addition_time)
if not duration:
duration = 0.0
else:
duration = float(duration)
# Otherwise create the supplier
if not supplier:
supplier = CompoundSupplier(
name=supplier_text,
created_by=created_by,
created_on=created_on,
modified_by=created_by,
modified_on=created_on,
)
try:
supplier.full_clean()
supplier.save()
except forms.ValidationError as e:
current_errors.append(
process_error_with_annotation(
prefix,
setup_row,
setup_column,
e
)
)
group_has_error = True
suppliers.update({
supplier_text: supplier
})
# FRUSTRATING EXCEPTION
if not receipt_date:
receipt_date = None
# Check if compound instance exists
compound_instance = compound_instances.get((compound, supplier.id, lot_text, str(receipt_date)), '')
if not compound_instance:
compound_instance = CompoundInstance(
compound_id=compound,
supplier=supplier,
lot=lot_text,
receipt_date=receipt_date,
created_by=created_by,
created_on=created_on,
modified_by=created_by,
modified_on=created_on,
)
try:
compound_instance.full_clean()
compound_instance.save()
except forms.ValidationError as e:
current_errors.append(
process_error_with_annotation(
prefix,
setup_row,
setup_column,
e
)
)
group_has_error = True
compound_instances.update({
(compound, supplier.id, lot_text, str(receipt_date)): compound_instance
})
# Save the AssayCompoundInstance
conflicting_assay_compound_instance = assay_compound_instances.get(
(
# NOPE! HAVE TO USE
# Hmmm... not sure what to use...
# We need something that is id agnostic
# But we can't use just the name!
# We need the study id and group name
# Should be enough!
self.instance.id,
setup_group.get('name', ''),
compound_instance.id,
concentration,
concentration_unit_id,
addition_time,
duration,
addition_location_id
), None
)
if not conflicting_assay_compound_instance:
new_compound = AssayGroupCompound(
# matrix_item_id=new_item.id,
compound_instance_id=compound_instance.id,
concentration=concentration,
concentration_unit_id=concentration_unit_id,
addition_time=addition_time,
duration=duration,
addition_location_id=addition_location_id,
# MANUALLY ADD THE GROUP NAME AS A CONTRIVED VALUE
group_id=setup_group.get('name', '')
)
try:
new_compound.full_clean(exclude=['group'])
# current_related_list.append(new_compound)
new_compounds.append(new_compound)
except forms.ValidationError as e:
current_errors.append(
process_error_with_annotation(
prefix,
setup_row,
setup_column,
e
)
)
group_has_error = True
assay_compound_instances.update({
(
self.instance.id,
setup_group.get('name', ''),
compound_instance.id,
concentration,
concentration_unit_id,
addition_time,
duration,
addition_location_id
): True
})
# We ought to process items separately for a number of reasons
# TODO
# Start of numbering for items
# We begin with the number of current_items + 1
# We assume, for the moment, that there will not be name collisions
# TODO: FOR TOTAL ASSURANCE, PREVENT NAME COLLISIONS
current_item_number = current_items.count() + 1
for current_chip in all_chip_data:
# Terminate early if no group
# BE CAREFUL, ZERO IS FALSY
if current_chip.get('group_index', None) is not None and len(all_setup_data) > current_chip.get('group_index'):
setup_group = all_setup_data[current_chip.get('group_index')]
else:
continue
# We know whether this is a current item if the id matches one in our list
current_item = current_item_ids.get(int(current_chip.get('id', 0)), None)
# TODO
if current_item:
# ??
# new_item = current_item
# TODO LOGIC FOR UPDATE HERE?
# It might be overkill, but user could change the organ model and protocol
# So just always pass the organ model, protocol, test_type, modified on and by
# NEVER BOTHER WITH THE GROUP
# WE KNOW WHAT THE GROUP IS BECAUSE YOU CANNOT CHANGE IT HERE
# WE AREN'T BOTHERING WITH UPDATING ITEMS HERE
# We will delete them, though
if current_chip.get('deleted', False):
deleted_items.append(current_item.id)
else:
# TODO NOTE: New chip names *theoretically* can conflict with existing chips
# For instance, someone can rename their 3 chips "3,4,5" and add three new chips, bad news!
# CRUDE
# We add "NEW-" until there is a valid name
new_item_name = str(current_item_number)
while new_item_name in current_item_names:
new_item_name = 'NEW-' + new_item_name
new_item = AssayMatrixItem(
# study=study,
# matrix=new_matrix,
name=new_item_name,
# JUST MAKE SETUP DATE THE STUDY DATE FOR NOW
setup_date=self.instance.start_date,
# Alternative row and column
# row_index=setup_row,
# column_index=iteration,
row_index=0,
column_index=current_column_max+1,
# Irrelevant (but required, unfortunately, maybe will remove later)
# device=study.organ_model.device,
organ_model_id=setup_group.get('organ_model_id', None),
# Some nuances here that we will gloss over
organ_model_protocol_id=setup_group.get('organ_model_protocol_id', None),
test_type=setup_group.get('test_type', ''),
created_by=created_by,
created_on=created_on,
modified_by=created_by,
modified_on=created_on,
study_id=self.instance.id,
# SOMEWHAT UNORTHODOX:
# We put the group name here
# THEN OVERRIDE IT WITH THE ID LATER
group_id=setup_group.get('name', ''),
)
current_item_names.update({
new_item_name: True
})
current_column_max += 1
try:
new_item.full_clean(exclude=[
# The matrix needs to be excluded because it might not exist yet
'matrix',
# DEFINITELY EXCLUDE GROUP
'group',
# Why exclude these?
'device',
# 'organ_model',
# 'organ_model_protocol',
])
new_items.append(new_item)
except forms.ValidationError as e:
non_field_errors.append(e)
group_has_error = True
# CAN CAUSE UNUSUAL BEHAVIOR DURING UPDATES!
current_item_number += 1
if current_errors or non_field_errors:
non_field_errors.append(['Please review the table below for errors.'])
raise forms.ValidationError(all_errors)
# Kind of odd at first blush, but we reverse to save in order
# new_items = list(reversed(new_items))
new_setup_data.update({
'new_matrix': new_matrix,
'new_items': new_items,
# NO LONGER HOW THINGS ARE HANDLED:
# 'new_related': new_related,
'new_compounds': new_compounds,
'new_cells': new_cells,
'new_settings': new_settings,
'new_groups': new_groups,
# TODO current_matrix?
# TODO updates?
# We PROBABLY don't need to modify the matrix
# I mean, if someone REALLY wanted to look at it, then it would be messed up if the number of chips changed
# 'update_matrix': update_matrix,
# Swapping groups etc (renaming is in a different interface)
# Maybe we ought to overkill update all in current items?
# Or, I suppose we can avoid superfluous updates by doing a comparison prior?
'update_groups': update_groups,
# NEEDED FOR KLUDGE:
'special_group_handling_required': special_group_handling_required,
# WE DON'T REALLY HAVE TO UPDATE ITEMS, BUT WE DO HAVE TO DELETE THEM
# Probably not needed here
'update_items': update_items,
# TODO TODO TODO
# First pass we are not going to bother with this
'update_compounds': update_compounds,
'update_cells': update_cells,
'update_settings': update_settings,
# WE NEED THE GROUP IDS!
'current_group_ids': current_group_ids,
# WE WOULD WANT TO KNOW IF THERE IS ALREADY A MATRIX!
'current_matrix': current_matrix,
# Probably not needed
# 'item_ids': item_ids,
'deleted_groups': deleted_groups,
'deleted_items': deleted_items,
# THESE GET TRICKY! IDEALLY WE WANT TO DELETE AS FEW RELATED AS POSSIBLE
# TODO TODO TODO
# First pass we are not going to bother with this
'deleted_compounds': deleted_compounds,
'deleted_cells': deleted_cells,
'deleted_settings': deleted_settings,
})
data.update({
'processed_setup_data': new_setup_data
})
return data
# TODO: REVISE TO USE bulk_create
# TODO: REVISE TO PROPERLY DEAL WITH UPDATES WITH bulk_update
def save(self, commit=True):
all_setup_data = self.cleaned_data.get('processed_setup_data', None)
# Sloppy
study = self.instance
if all_setup_data and commit:
# VERY SLOPPY
created_by = self.user
created_on = timezone.now()
study.modified_by = created_by
study.modified_on = created_on
study.save()
# SLOPPY: REVISE
study_id = study.id
# TODO TODO TODO: STUPID, BUT ONE WAY TO DEAL WITH THE DEVICE ISSUE
# Otherwise I would need to cut it out and immediately revise every place it was called...
# Light query anyway (relative to the others) I guess
organ_model_id_to_device_id = {
organ_model.id: organ_model.device_id for organ_model in OrganModel.objects.all()
}
new_matrix = all_setup_data.get('new_matrix', None)
new_groups = all_setup_data.get('new_groups', None)
update_groups = all_setup_data.get('update_groups', None)
current_group_ids = all_setup_data.get('current_group_ids', None)
new_items = all_setup_data.get('new_items', None)
update_items = all_setup_data.get('update_items', None)
# Why?
new_compounds = all_setup_data.get('new_compounds', None)
new_cells = all_setup_data.get('new_cells', None)
new_settings = all_setup_data.get('new_settings', None)
update_compounds = all_setup_data.get('update_compounds', None)
update_cells = all_setup_data.get('update_cells', None)
update_settings = all_setup_data.get('update_settings', None)
current_matrix = all_setup_data.get('current_matrix', None)
deleted_groups = all_setup_data.get('deleted_groups', None)
deleted_items = all_setup_data.get('deleted_items', None)
deleted_compounds = all_setup_data.get('deleted_compounds', None)
deleted_cells = all_setup_data.get('deleted_cells', None)
deleted_settings = all_setup_data.get('deleted_settings', None)
if new_matrix:
new_matrix.study_id = study_id
new_matrix.save()
new_matrix_id = new_matrix.id
current_matrix_id = new_matrix_id
# Uh... why use an elif here?
# elif not new_matrix and (new_items or update_items):
else:
# TODO TODO TODO
# Have some way to get the "current matrix" if there isn't a new matrix
# TODO Be careful, if there are no chips, don't bother!
current_matrix_id = all_setup_data.get('current_matrix')
# UPDATE CURRENT GROUPS IF NECESSARY
# Bulk update should be relatively fast
if update_groups:
if not all_setup_data.get('special_group_handling_required'):
AssayGroup.objects.bulk_update(update_groups, self.update_group_fields)
else:
# Stupid, each of these is a query
# (Quite expensive, hopefully this edge case is rare)
for index, group in enumerate(update_groups):
# Store the desired name
desired_name = group.name
# Just make a dumb temp name to get rid of conflicts
# If a user is seriously using this...
temp_name = 'WALLA1029XYZABCTEMPTODEALWITHCONFLICTS-{}'.format(index)
# We may need more temp names (hopefully not...)
group.name = temp_name
group.save()
group.name = desired_name
# NOW try performing the bulk save
AssayGroup.objects.bulk_update(update_groups, self.update_group_fields)
# These dics match names (which are specific to this study! They are not from queries!) to ids
# We want to avoid superfluous queries, and we might as well take advantage of the uniqueness constraints
new_group_ids = {}
# Combine current_group ids into new_group_ids
# Way more verbose than it needs to be
for current_group_id, current_group in current_group_ids.items():
new_group_ids.update({
current_group.name: current_group_id
})
# new_item_ids = {}
# TODO
# for new_group in new_groups:
# We don't really need to tie to anything?
# Be careful with conditionals
if new_groups:
# Bulk create the groups BE SURE ONLY NEW GROUPS IN THIS LIST
AssayGroup.objects.bulk_create(new_groups)
# TODO NOTE: WE NEED ALL GROUPS, NOT JUST THESE
for new_group in new_groups:
new_group_ids.update({
new_group.name: new_group.id
})
# NOTE: WE MAY HAVE NEW ITEMS WITHOUT A NEW MATRIX
for new_item in new_items:
# ADD MATRIX and tracking
# TODO TODO TODO TODO DO NOT USE new_matrix_id HERE
new_item.matrix_id = current_matrix_id
# IDEALLY WE WILL JUST CUT THESE ANYWAY??
new_item.device_id = organ_model_id_to_device_id.get(new_item.organ_model_id)
# TODO TODO TODO
# We perform a little bit of sleight of hand here!
current_group_name = new_item.group_id
# Assign the correct group
# "new_group_ids" is a misnomer!
new_group_id = new_group_ids.get(current_group_name, None)
new_item.group_id = new_group_id
# To get the group id with have to use a dic TODO TODO
# Ideally we won't save this way! We want to use bulk_create
# new_item.save()
# Ideally we would do this after the bulk_create
# new_item_ids.update({
# new_item.name: new_item.id
# })
if new_items:
AssayMatrixItem.objects.bulk_create(new_items)
# We shouldn't actually need these for anything
# for new_item in new_items:
# new_item_ids.update({
# new_item.name: new_item.id
# })
# WE WILL WANT TO SAVE EACH COMPONENT SEPARATELY FOR BULK CREATE/UPDATE (otherwise gets odd)
# Additionally, this data is no longer tied to an individual item
# for current_item_name, new_related_data_set in new_related.items():
# new_item_id = new_item_ids.get(current_item_name, None)
# if new_item_id:
# for new_related_data in new_related_data_set:
# # ADD MATRIX ITEM
# new_related_data.matrix_item_id = new_item_id
# new_related_data.save()
# Can't use ids due to the fact that some ids are new!
# Be sure you can access ALL groups for this!
# (EXCLUDING DELETED GROUPS, WHICH OUGHT NOT TO BE ACCESSED)
# Barbaric, just obscene in its grotesque cruelty
# How could one do such a thing and not be deemed heartless?
# KILL ALL COMPOUNDS, CELL, AND SETTINGS:
AssayGroupCompound.objects.filter(group_id__in=new_group_ids.values()).delete()
AssayGroupCell.objects.filter(group_id__in=new_group_ids.values()).delete()
AssayGroupSetting.objects.filter(group_id__in=new_group_ids.values()).delete()
# Breaks rule of three
# Stupid
for new_compound in new_compounds:
# We perform a little bit of sleight of hand here!
current_group_name = new_compound.group_id
# Assign the correct group
# "new_group_ids" is a misnomer!
new_group_id = new_group_ids.get(current_group_name, None)
new_compound.group_id = new_group_id
if new_compounds:
AssayGroupCompound.objects.bulk_create(reversed(new_compounds))
for new_cell in new_cells:
# We perform a little bit of sleight of hand here!
current_group_name = new_cell.group_id
# Assign the correct group
# "new_group_ids" is a misnomer!
new_group_id = new_group_ids.get(current_group_name, None)
new_cell.group_id = new_group_id
if new_cells:
AssayGroupCell.objects.bulk_create(reversed(new_cells))
for new_setting in new_settings:
# We perform a little bit of sleight of hand here!
current_group_name = new_setting.group_id
# Assign the correct group
# "new_group_ids" is a misnomer!
new_group_id = new_group_ids.get(current_group_name, None)
new_setting.group_id = new_group_id
if new_settings:
AssayGroupSetting.objects.bulk_create(reversed(new_settings))
# Perform deletions
if deleted_items:
AssayMatrixItem.objects.filter(id__in=deleted_items, matrix_id=current_matrix_id).delete()
if deleted_groups:
AssayGroup.objects.filter(id__in=deleted_groups, study_id=self.instance.id).delete()
return study
class AssayStudyChipForm(SetupFormsMixin, SignOffMixin, BootstrapForm):
series_data = forms.CharField(required=False)
class Meta(object):
model = AssayStudy
# Since we are splitting into multiple forms, includes are safer
fields = (
'series_data',
'organ_model_full',
'organ_model_protocol_full'
) + flag_group
def __init__(self, *args, **kwargs):
super(AssayStudyChipForm, self).__init__(*args, **kwargs)
# Prepopulate series_data from thing
self.fields['series_data'].initial = self.instance.get_group_data_string(get_chips=True)
# TODO NEEDS TO BE REVISED
# TODO TODO TODO CLEAN AND SAVE
def clean(self):
cleaned_data = super(AssayStudyChipForm, self).clean()
# TODO TODO TODO NOTE CRAMMED IN
# Am I sticking with the name 'series_data'?
if self.cleaned_data.get('series_data', None):
all_data = json.loads(self.cleaned_data.get('series_data', '[]'))
else:
# Contrived defaults
all_data = {
'series_data': [],
'chips': [],
'plates': {}
}
# The data for groups is currently stored in series_data
all_setup_data = all_data.get('series_data')
all_chip_data = all_data.get('chips')
# Catch technically empty setup data
setup_data_is_empty = True
for group_set in all_setup_data:
if group_set:
setup_data_is_empty = not any(group_set.values())
if setup_data_is_empty:
all_setup_data = []
# Variables must always exist
chip_data = []
current_errors = []
# if commit and all_setup_data:
# SEE BASE MODELS FOR WHY COMMIT IS NOT HERE
if all_chip_data:
chip_names = {}
# Interestingly, we need to prevent overlap with wells as well
well_names = { well.name: True for well in AssayMatrixItem.objects.filter(study_id=self.instance.id).exclude(matrix__representation='chips') }
current_matrix = AssayMatrix.objects.filter(
# The study must exist in order to visit this page, so getting the id this was is fine
study_id=self.instance.id,
representation='chips'
)
# Get the group ids
group_ids = {
group.id: True for group in AssayGroup.objects.filter(
study_id=self.instance.id
)
}
chip_id_to_chip = {
chip.id: chip for chip in AssayMatrixItem.objects.filter(
study_id=self.instance.id,
matrix_id=current_matrix[0].id
)
}
special_handling_required = False
# Crude, but we need current chips
current_chip_names = {chip.name: chip.id for chip in chip_id_to_chip.values()}
# We basically just modify group_id and name
# TODO MAKE SURE THE GROUP IDS ARE VALID
# That is, check against the group ids of the study
for chip in all_chip_data:
if chip_id_to_chip.get(chip.get('id', 0)):
current_chip = chip_id_to_chip.get(chip.get('id', 0))
else:
current_chip = False
current_errors.append('A Chip is missing, please refresh and try again.')
if current_chip:
# Add to group names
# Check uniqueness
if chip.get('name', '') in chip_names:
current_errors.append('The Chip name "{}" is duplicated. The names of Chips must be unique.'.format(
chip.get('name', '')
))
elif chip.get('name', '') in well_names:
current_errors.append('The Chip name "{}" is the name of a Well. Either rename the well or choose a different name for the Chip.'.format(
chip.get('name', '')
))
else:
chip_names.update({
chip.get('name', ''): True
})
current_chip.name = chip.get('name', '')
# Note that we need special handling when there is a conflict in current chip names
if chip.get('name', '') in current_chip_names and current_chip.id != current_chip_names.get(chip.get('name', '')):
special_handling_required = True
if chip.get('group_id', '') in group_ids:
current_chip.group_id = chip.get('group_id', '')
try:
# INTERESTINGLY: We actually exclude name!
# Why? Because we don't want it erroring out from *existing* names
# It is an edge case, but someone might have, say, 100 chips
# If they rename the chips starting with 50...
# There would be an error because "50" was already a chosen name!
current_chip.full_clean(
exclude=['name']
)
chip_data.append(current_chip)
except forms.ValidationError as e:
current_errors.append(e)
self.cleaned_data.update({
'chip_data': chip_data,
'special_handling_required': special_handling_required
})
if current_errors:
raise forms.ValidationError(current_errors)
return cleaned_data
def save(self, commit=True):
# Just do the bulk update
# We don't need to do anything else
# TODO TODO TODO
# NOTE NOTE NOTE
# WE TECHNICALLY SHOULD CHANGE THE SHARED VALUES HERE (organ_model, test_type, etc.)
# ON THE OTHER HAND, IT IS PROBABLY BEST TO LEAVE THEM OUT
if commit:
# Due to the nature of bulk_update and uniqueness checks...
# We need to catch integrity errors and handle them
if not self.cleaned_data.get('special_handling_required'):
AssayMatrixItem.objects.bulk_update(
self.cleaned_data.get('chip_data', None),
[
'group_id',
'name'
]
)
else:
# Stupid, each of these is a query
# (Quite expensive, hopefully this edge case is rare)
for index, chip in enumerate(self.cleaned_data.get('chip_data', None)):
# Store the desired name
desired_name = chip.name
# Just make a dumb temp name to get rid of conflicts
# If a user is seriously using this...
temp_name = 'WALLA1029XYZABCTEMPTODEALWITHCONFLICTS-{}'.format(index)
# We may need more temp names (hopefully not...)
chip.name = temp_name
chip.save()
chip.name = desired_name
# NOW try performing the bulk save
AssayMatrixItem.objects.bulk_update(
self.cleaned_data.get('chip_data', None),
[
'group_id',
'name'
]
)
return self.instance
class AssayStudyPlateForm(SetupFormsMixin, SignOffMixin, BootstrapForm):
series_data = forms.CharField(required=False)
class Meta(object):
model = AssayMatrix
fields = (
'name',
'notes',
# Don't care about device anymore, I guess
# 'device',
# DO care about organ model, I guess
'organ_model',
# Maybe a bit unorthodox
'number_of_columns',
'number_of_rows',
# TODO
'series_data',
) + flag_group
widgets = {
'name': forms.Textarea(attrs={'rows': 1}),
'notes': forms.Textarea(attrs={'rows': 10}),
}
def __init__(self, *args, **kwargs):
self.study = kwargs.pop('study', None)
super(AssayStudyPlateForm, self).__init__(*args, **kwargs)
if self.study:
self.instance.study = self.study
else:
self.study = self.instance.study
# Crude! TEMPORARY
if self.instance.id:
self.fields['series_data'].initial = self.study.get_group_data_string(plate_id=self.instance.id)
else:
self.fields['series_data'].initial = self.study.get_group_data_string()
# Predicate the organ model options on the current groups
# Dumb query
plate_groups = AssayGroup.objects.filter(
study_id=self.instance.study.id,
# See above
organ_model__device__device_type='plate'
).prefetch_related('organ_model__device')
self.fields['organ_model'].queryset = OrganModel.objects.filter(
id__in=plate_groups.values_list('organ_model_id', flat=True)
)
# Improper, but one method to make organ model required
self.fields['organ_model'].widget.attrs['class'] += ' required'
self.fields['organ_model'].required = True
# FORCE UNIQUENESS CHECK
def clean(self):
# RATHER CRUDE: WE FORCE THE PLATE TO HAVE A REPRESENTATION OF PLATE
self.instance.representation = 'plate'
cleaned_data = super(AssayStudyPlateForm, self).clean()
# VERY SLOPPY
created_by = self.user
created_on = timezone.now()
# TODO TODO TODO NOTE CRAMMED IN
# Am I sticking with the name 'series_data'?
if self.cleaned_data.get('series_data', None):
all_data = json.loads(self.cleaned_data.get('series_data', '[]'))
else:
# Contrived defaults
all_data = {
'series_data': [],
'chips': [],
'plates': {}
}
# The data for groups is currently stored in series_data
all_setup_data = all_data.get('series_data')
all_plate_data = all_data.get('plates')
# Catch technically empty setup data
setup_data_is_empty = True
for group_set in all_setup_data:
if group_set:
setup_data_is_empty = not any(group_set.values())
if setup_data_is_empty:
all_setup_data = []
# Plate name
if AssayMatrix.objects.filter(
study_id=self.instance.study.id,
name=self.cleaned_data.get('name', '')
).exclude(pk=self.instance.pk).count():
raise forms.ValidationError({'name': ['Plate name must be unique within Study.']})
current_wells = {
well.id: well for well in AssayMatrixItem.objects.filter(
matrix_id=self.instance.id
)
}
# Technically ought to restrict to JUST PLATE GROUPS
# However, that makes the query uglier
# First pass, we won't make sure a check
current_groups = {
group.id: group for group in AssayGroup.objects.filter(
study_id=self.instance.study.id
)
}
new_wells = []
update_wells = []
delete_wells = []
# NOTE: We will be alerted during clean for anything that ISN'T INTERNAL to the plate
taken_names = {}
current_errors = []
for row_column, well in all_plate_data.items():
row_column_split = row_column.split('_')
row = int(row_column_split[0])
column = int(row_column_split[1])
current_well = current_wells.get(well.get('id', 0), None)
current_group = current_groups.get(well.get('group_id', 0), None)
# Junk data
if not current_group:
continue
# BE SURE TO TEST THE NAMES!
current_name = well.get('name', '')
if current_name in taken_names:
current_errors.append('The name "{}" is already in use, please make sure well names are unique.'.format(
current_name
))
else:
taken_names.update({
current_name: True
})
# Update if already exists
if current_well:
# If slated for deletion
if well.get('deleted', ''):
delete_wells.append(current_well.id)
else:
# Update name
current_well.name = current_name
if well.get('group_id', '') in current_groups:
# Update group id
current_well.group_id = well.get('group_id', '')
# Make sure nothing broke
try:
current_well.full_clean()
# Add it to those slated to update
update_wells.append(current_well)
except forms.ValidationError as e:
current_errors.append(e)
# Add otherwise
else:
new_item = AssayMatrixItem(
# We know this one
study=self.instance.study,
# TRICKY!
# matrix=new_matrix,
name=current_name,
# JUST MAKE SETUP DATE THE STUDY DATE FOR NOW
setup_date=self.instance.study.start_date,
row_index=row,
column_index=column,
# Irrelevant (but required, unfortunately, maybe will remove later)
# device=study.organ_model.device,
organ_model_id=current_group.organ_model_id,
# Some nuances here that we will gloss over
organ_model_protocol_id=current_group.organ_model_protocol_id,
test_type=current_group.test_type,
created_by=created_by,
created_on=created_on,
modified_by=created_by,
modified_on=created_on,
group_id=current_group.id
)
try:
new_item.full_clean(exclude=[
# The matrix needs to be excluded because it might not exist yet
'matrix',
# Why exclude these?
# Get rid of device for now because it is still required
'device',
# 'organ_model',
# 'organ_model_protocol',
])
new_wells.append(new_item)
except forms.ValidationError as e:
current_errors.append(e)
group_has_error = True
cleaned_data.update({
'new_wells': new_wells,
'update_wells': update_wells,
'delete_wells': delete_wells,
})
if current_errors:
raise forms.ValidationError(current_errors)
return cleaned_data
# CRUDE, TEMPORARY
# TODO REVISE ASAP
def save(self, commit=True):
# Takes care of saving the Plate in and of itself
matrix = super(AssayStudyPlateForm, self).save(commit)
if commit:
matrix_id = self.instance.id
study_id = self.instance.study.id
# TODO TODO TODO: STUPID, BUT ONE WAY TO DEAL WITH THE DEVICE ISSUE
# Otherwise I would need to cut it out and immediately revise every place it was called...
# Light query anyway (relative to the others) I guess
organ_model_id_to_device_id = {
organ_model.id: organ_model.device_id for organ_model in OrganModel.objects.all()
}
new_wells = self.cleaned_data.get('new_wells')
update_wells = self.cleaned_data.get('update_wells')
delete_wells = self.cleaned_data.get('delete_wells')
# Add new wells
if new_wells:
# Need to iterate through the new wells and add the matrix id
# (The matrix might not exist yet)
for well in new_wells:
well.matrix_id = matrix_id
# IDEALLY WE WILL JUST CUT THESE ANYWAY??
well.device_id = organ_model_id_to_device_id.get(well.organ_model_id)
AssayMatrixItem.objects.bulk_create(new_wells)
# Update wells
# TODO TODO TODO
# NOTE NOTE NOTE
# WE TECHNICALLY SHOULD CHANGE THE SHARED VALUES HERE (organ_model, test_type, etc.)
# ON THE OTHER HAND, IT IS PROBABLY BEST TO LEAVE THEM OUT
if update_wells:
AssayMatrixItem.objects.bulk_update(
update_wells,
[
'name',
'group_id'
]
)
if delete_wells:
AssayMatrixItem.objects.filter(id__in=delete_wells).delete()
return matrix
# Need to make plural to distinguish
# CONTRIVED ANYWAY
class AssayStudyAssaysForm(BootstrapForm):
class Meta(object):
model = AssayStudy
# Since we are splitting into multiple forms, includes are safer
fields = flag_group
class AssayStudyFormAdmin(BootstrapForm):
"""Admin Form for Assay Runs (now referred to as Studies)"""
class Meta(object):
model = AssayStudy
widgets = {
'assay_run_id': forms.Textarea(attrs={'rows': 1}),
'name': forms.Textarea(attrs={'rows': 1}),
'description': forms.Textarea(attrs={'rows': 10}),
'signed_off_notes': forms.Textarea(attrs={'rows': 10}),
}
exclude = ('',)
def __init__(self, *args, **kwargs):
super(AssayStudyFormAdmin, self).__init__(*args, **kwargs)
groups_with_center = MicrophysiologyCenter.objects.all().values_list('groups', flat=True)
groups_with_center_full = Group.objects.filter(
id__in=groups_with_center
).order_by(
'name'
)
self.fields['group'].queryset = groups_with_center_full
groups_without_repeat = groups_with_center_full
if self.instance and getattr(self.instance, 'group', ''):
groups_without_repeat.exclude(pk=self.instance.group.id)
self.fields['access_groups'].queryset = groups_without_repeat
self.fields['collaborator_groups'].queryset = groups_without_repeat
# Crudely force required class
for current_field in ['total_device_volume', 'flow_rate', 'number_of_relevant_cells']:
self.fields[current_field].widget.attrs['class'] += ' required'
def clean(self):
# clean the form data, before validation
data = super(AssayStudyFormAdmin, self).clean()
if not any([data['toxicity'], data['efficacy'], data['disease'], data['cell_characterization'], data['omics'], data['pbpk_steady_state'], data['pbpk_bolus']]):
raise forms.ValidationError('Please select at least one study type')
if data.get('pbpk_steady_state', '') and (not data.get('number_of_relevant_cells', '') or not data.get('flow_rate', '')):
raise forms.ValidationError('Continuous Infusion PBPK Requires Number of Cells Per MPS Model and Flow Rate')
if data.get('pbpk_bolus', '') and (not data.get('number_of_relevant_cells', '') or not data.get('total_device_volume', '')):
raise forms.ValidationError('Bolus PBPK Requires Number of Cells Per MPS Model and Total Device Volume')
return data
class AssayStudyAccessForm(forms.ModelForm):
"""Form for changing access to studies"""
def __init__(self, *args, **kwargs):
super(AssayStudyAccessForm, self).__init__(*args, **kwargs)
# NEED A MORE ELEGANT WAY TO GET THIS
first_center = self.instance.group.center_groups.first()
groups_without_repeat = Group.objects.filter(
id__in=first_center.accessible_groups.all().values_list('id', flat=True),
).order_by(
'name'
).exclude(
id=self.instance.group.id
)
self.fields['access_groups'].queryset = groups_without_repeat
self.fields['collaborator_groups'].queryset = groups_without_repeat
class Meta(object):
model = AssayStudy
fields = (
'collaborator_groups',
'access_groups',
)
class AssayStudySupportingDataForm(BootstrapForm):
class Meta(object):
model = AssayStudySupportingData
exclude = ('',)
class AssayStudyAssayForm(BootstrapForm):
class Meta(object):
model = AssayStudyAssay
exclude = ('',)
class AssayStudySupportingDataInlineFormSet(BaseInlineFormSet):
"""Form for Study Supporting Data (as part of an inline)"""
class Meta(object):
model = AssayStudySupportingData
exclude = ('',)
AssayStudySupportingDataFormSetFactory = inlineformset_factory(
AssayStudy,
AssayStudySupportingData,
form=AssayStudySupportingDataForm,
formset=AssayStudySupportingDataInlineFormSet,
extra=1,
exclude=[],
widgets={
'description': forms.Textarea(attrs={'rows': 3}),
}
)
AssayStudyAssayFormSetFactory = inlineformset_factory(
AssayStudy,
AssayStudyAssay,
form=AssayStudyAssayForm,
formset=AssayStudyAssayInlineFormSet,
extra=1,
exclude=[]
)
# TODO ADD STUDY
class AssayMatrixForm(SetupFormsMixin, SignOffMixin, BootstrapForm):
class Meta(object):
model = AssayMatrix
exclude = ('study',) + tracking
widgets = {
'number_of_columns': forms.NumberInput(attrs={'style': 'width: 100px;'}),
'number_of_rows': forms.NumberInput(attrs={'style': 'width: 100px;'}),
'name': forms.Textarea(attrs={'rows': 1}),
'notes': forms.Textarea(attrs={'rows': 3}),
'variance_from_organ_model_protocol': forms.Textarea(attrs={'rows': 3}),
}
def __init__(self, *args, **kwargs):
self.study = kwargs.pop('study', None)
# self.user = kwargs.pop('user', None)
super(AssayMatrixForm, self).__init__(*args, **kwargs)
if self.study:
self.instance.study = self.study
# sections_with_times = (
# 'compound',
# 'cell',
# 'setting'
# )
#
# for time_unit in list(TIME_CONVERSIONS.keys()):
# for current_section in sections_with_times:
# # Create fields for Days, Hours, Minutes
# self.fields[current_section + '_addition_time_' + time_unit] = forms.FloatField(
# initial=0,
# required=False,
# widget=forms.NumberInput(attrs={
# 'class': 'form-control',
# 'style': 'width:75px;'
# })
# )
# self.fields[current_section + '_duration_' + time_unit] = forms.FloatField(
# initial=0,
# required=False,
# widget=forms.NumberInput(attrs={
# 'class': 'form-control',
# 'style': 'width:75px;'
# })
# )
# Changing these things in init is bad
self.fields['matrix_item_notebook_page'].widget.attrs['style'] = 'width:75px;'
# self.fields['cell_cell_sample'].widget.attrs['style'] = 'width:75px;'
# self.fields['cell_passage'].widget.attrs['style'] = 'width:75px;'
# Make sure no selectize
# CONTRIVED
self.fields['matrix_item_full_organ_model'].widget.attrs['class'] = 'no-selectize'
self.fields['matrix_item_full_organ_model_protocol'].widget.attrs['class'] = 'no-selectize'
# No selectize on action either (hides things, looks odd)
# CONTRIVED
# self.fields['action'].widget.attrs['class'] += ' no-selectize'
# DUMB, BAD (can't have them be "actually" required or they prevent submission
add_required_to = [
'matrix_item_name',
'matrix_item_setup_date',
'matrix_item_test_type',
'matrix_item_name',
'matrix_item_device',
'matrix_item_organ_model',
]
for current_field in add_required_to:
self.fields[current_field].widget.attrs['class'] += ' required'
### ADDITIONAL MATRIX FIELDS (unsaved)
number_of_items = forms.IntegerField(required=False)
### ITEM FIELD HELPERS
# action = forms.ChoiceField(choices=(
# ('', 'Please Select an Action'),
# ('add_name', 'Add Names/IDs*'),
# ('add_test_type', 'Add Test Type*'),
# ('add_date', 'Add Setup Date*'),
# ('add_device', 'Add Device/MPS Model Information*'),
# ('add_settings', 'Add Settings'),
# ('add_compounds', 'Add Compounds'),
# ('add_cells', 'Add Cells'),
# ('add_notes', 'Add Notes/Notebook Information'),
# # ADD BACK LATER
# # ('copy', 'Copy Contents'),
# # TODO TODO TODO TENTATIVE
# # ('clear', 'Clear Contents'),
# ('delete', 'Delete Selected'),
# ), required=False)
# The matrix_item isn't just to be annoying, I want to avoid conflicts with other fields
### ADDING ITEM FIELDS
matrix_item_name = forms.CharField(
required=False,
widget=forms.Textarea(attrs={'rows': 1}),
label='Matrix Item Name'
)
matrix_item_setup_date = forms.DateField(
required=False,
label='Matrix Item Setup Date'
)
# Foolish!
matrix_item_setup_date_popup = forms.DateField(required=False)
matrix_item_test_type = forms.ChoiceField(
required=False,
choices=TEST_TYPE_CHOICES,
label='Matrix Item Test Type'
)
matrix_item_scientist = forms.CharField(
required=False,
widget=forms.Textarea(attrs={'rows': 1}),
label='Scientist'
)
matrix_item_notebook = forms.CharField(
required=False,
label='Notebook'
)
matrix_item_notebook_page = forms.CharField(
required=False,
label='Notebook Page'
)
matrix_item_notes = forms.CharField(
required=False,
widget=forms.Textarea(attrs={'rows': 3}),
label='Notes'
)
### ADDING SETUP FIELDS
matrix_item_device = forms.ModelChoiceField(
queryset=Microdevice.objects.all().order_by('name'),
required=False,
label='Matrix Item Device'
)
matrix_item_organ_model = forms.ModelChoiceField(
queryset=OrganModel.objects.all().order_by('name'),
required=False,
label='Matrix Item MPS Model'
)
matrix_item_organ_model_protocol = forms.ModelChoiceField(
queryset=OrganModelProtocol.objects.all().order_by('version'),
required=False,
label='Matrix Item MPS Model Version'
)
matrix_item_variance_from_organ_model_protocol = forms.CharField(
required=False,
widget=forms.Textarea(attrs={'rows': 3}),
label='Matrix Item Variance from Protocol'
)
matrix_item_full_organ_model = forms.ModelChoiceField(
queryset=OrganModel.objects.all().order_by('name'),
required=False
)
matrix_item_full_organ_model_protocol = forms.ModelChoiceField(
queryset=OrganModelProtocol.objects.all(),
required=False
)
### INCREMENTER
compound_concentration_increment = forms.FloatField(required=False, initial=1)
compound_concentration_increment_type = forms.ChoiceField(
choices=(
('/', 'Divide'),
('*', 'Multiply'),
('+', 'Add'),
('-', 'Subtract')
),
required=False
)
compound_concentration_increment_direction = forms.ChoiceField(
choices=(
('lr', 'Left to Right'),
('d', 'Down'),
('rl', 'Right to Left'),
('u', 'Up'),
('lrd', 'Left to Right and Down'),
('rlu', 'Right to Left and Up')
),
required=False,
initial='lr'
)
# Options for deletion
delete_option = forms.ChoiceField(
required=False,
choices=(
('all', 'Everything'),
('cell', 'Cells'),
('compound', 'Compounds'),
('setting', 'Settings'),
),
label='Delete Option'
)
# FORCE UNIQUENESS CHECK
def clean(self):
super(AssayMatrixForm, self).clean()
if AssayMatrix.objects.filter(
study_id=self.instance.study.id,
name=self.cleaned_data.get('name', '')
).exclude(pk=self.instance.pk).count():
raise forms.ValidationError({'name': ['Matrix name must be unique within study.']})
class AssaySetupCompoundForm(ModelFormSplitTime):
compound = forms.CharField()
class Meta(object):
model = AssaySetupCompound
exclude = tracking
# TODO: IDEALLY THE CHOICES WILL BE PASSED VIA A KWARG
class AssaySetupCompoundFormSet(BaseModelFormSetForcedUniqueness):
custom_fields = (
'matrix_item',
'compound_instance',
'concentration_unit',
'addition_location'
)
def __init__(self, *args, **kwargs):
# TODO EVENTUALLY PASS WITH KWARG
# self.suppliers = kwargs.pop('suppliers', None)
# self.compound_instances = kwargs.pop('compound_instances', None)
# self.compound_instances_dic = kwargs.pop('compound_instances_dic', None)
# self.setup_compounds = kwargs.pop('setup_compounds', None)
# Get all chip setup assay compound instances
self.matrix = kwargs.pop('matrix', None)
self.setup_compounds = {
(
instance.matrix_item_id,
instance.compound_instance_id,
instance.concentration,
instance.concentration_unit_id,
instance.addition_time,
instance.duration,
instance.addition_location_id
): True for instance in AssaySetupCompound.objects.filter(
matrix_item__matrix=self.matrix
)
}
self.compound_instances = {}
self.compound_instances_dic = {}
for instance in CompoundInstance.objects.all().prefetch_related('supplier'):
self.compound_instances.update({
(
instance.compound_id,
instance.supplier_id,
instance.lot,
instance.receipt_date
): instance
})
# NOTE use of name instead of id!
self.compound_instances_dic.update({
instance.id: (
instance.compound_id,
instance.supplier.name,
instance.lot,
instance.receipt_date
)
})
# Get all suppliers
self.suppliers = {
supplier.name: supplier for supplier in CompoundSupplier.objects.all()
}
super(AssaySetupCompoundFormSet, self).__init__(*args, **kwargs)
filters = {'matrix_item': {'matrix_id': self.matrix.id}}
self.dic = get_dic_for_custom_choice_field(self, filters=filters)
for form in self.forms:
for field in self.custom_fields:
form.fields[field] = DicModelChoiceField(field, self.model, self.dic)
# Purge all classes
for field in form.fields:
form.fields[field].widget.attrs['class'] = ''
def _construct_form(self, i, **kwargs):
form = super(AssaySetupCompoundFormSet, self)._construct_form(i, **kwargs)
# Text field (un-saved) for supplier
form.fields['supplier_text'] = forms.CharField(initial='N/A', required=False)
# Text field (un-saved) for lot
form.fields['lot_text'] = forms.CharField(initial='N/A', required=False)
# Receipt date
form.fields['receipt_date'] = forms.DateField(required=False)
if form.instance:
current_compound_instance_id = form.instance.compound_instance_id
else:
current_compound_instance_id = None
if current_compound_instance_id:
current_compound_instance = self.compound_instances_dic.get(current_compound_instance_id)
# form.fields['compound'].initial = current_compound_instance.compound
# form.fields['supplier_text'].initial = current_compound_instance.supplier.name
# form.fields['lot_text'].initial = current_compound_instance.lot
# form.fields['receipt_date'].initial = current_compound_instance.receipt_date
form.fields['compound'].initial = current_compound_instance[0]
form.fields['supplier_text'].initial = current_compound_instance[1]
form.fields['lot_text'].initial = current_compound_instance[2]
form.fields['receipt_date'].initial = current_compound_instance[3]
return form
# TODO TODO TODO
# Will either have to decouple compound instance and supplier or else have a dic ALL FORMSETS reference
# Ostensibly, I can pass a pointer to a dictionary so that all of the formsets see the same thing
def save(self, commit=True):
# Get forms_data (excluding those with delete or no data)
forms_data = [f for f in self.forms if f.cleaned_data and not f.cleaned_data.get('DELETE', False)]
forms_to_delete = [f for f in self.forms if f.cleaned_data and f.cleaned_data.get('DELETE', False)]
# Forms to be deleted
for form in forms_to_delete:
try:
instance = BootstrapForm.save(form, commit=False)
if instance and instance.id and commit:
instance.delete()
# ValueError here indicates that the instance couldn't even validate and so should be ignored
except ValueError:
pass
# Forms to save
for form in forms_data:
instance = BootstrapForm.save(form, commit=False)
matrix_item = instance.matrix_item
current_data = form.cleaned_data
# Bad
if not current_data.get('supplier_text'):
current_data['supplier_text'] = 'N/A'
if not current_data.get('lot_text'):
current_data['lot_text'] = 'N/A'
compound_id = int(current_data.get('compound'))
supplier_text = current_data.get('supplier_text').strip()
lot_text = current_data.get('lot_text').strip()
receipt_date = current_data.get('receipt_date')
# Should be acquired straight from form
# concentration = current_data.get('concentration')
# concentration_unit = current_data.get('concentration_unit')
addition_time = 0
duration = 0
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
addition_time += current_data.get('addition_time_' + time_unit, 0) * conversion
duration += current_data.get('duration_' + time_unit, 0) * conversion
# Check if the supplier already exists
supplier = self.suppliers.get(supplier_text, '')
# Otherwise create the supplier
if not supplier:
supplier = CompoundSupplier(
name=supplier_text,
created_by=matrix_item.created_by,
created_on=matrix_item.created_on,
modified_by=matrix_item.modified_by,
modified_on=matrix_item.modified_on
)
# if commit:
# supplier.save()
# Always save the supplier
supplier.save()
self.suppliers.update({
supplier_text: supplier
})
# Check if compound instance exists
compound_instance = self.compound_instances.get((compound_id, supplier.id, lot_text, receipt_date), '')
if not compound_instance:
compound_instance = CompoundInstance(
compound_id=compound_id,
supplier=supplier,
lot=lot_text,
receipt_date=receipt_date,
created_by=matrix_item.created_by,
created_on=matrix_item.created_on,
modified_by=matrix_item.modified_by,
modified_on=matrix_item.modified_on
)
# if commit:
# compound_instance.save()
# ALWAYS MAKE A NEW COMPOUND INSTANCE
compound_instance.save()
self.compound_instances.update({
(compound_id, supplier.id, lot_text, receipt_date): compound_instance
})
# Update the instance with new data
# instance.matrix_item = matrix_item
instance.compound_instance = compound_instance
instance.addition_time = addition_time
instance.duration = duration
# Save the instance
if commit:
conflicting_assay_compound_instance = self.setup_compounds.get(
(
instance.matrix_item_id,
instance.compound_instance_id,
instance.concentration,
instance.concentration_unit_id,
instance.addition_time,
instance.duration,
instance.addition_location_id
), None
)
# If there is not conflict or if this is an update
if not conflicting_assay_compound_instance:
instance.save()
# Do nothing otherwise (it already exists)
self.setup_compounds.update({
(
instance.matrix_item_id,
instance.compound_instance_id,
instance.concentration,
instance.concentration_unit_id,
instance.addition_time,
instance.duration,
instance.addition_location_id
): True
})
# UGLY SOLUTION
# DEPRECATED
class AssaySetupCompoundInlineFormSet(BaseInlineFormSet):
"""Frontend Inline FormSet for Compound Instances"""
class Meta(object):
model = AssaySetupCompound
exclude = ('',)
def __init__(self, *args, **kwargs):
"""Init Chip Setup Form
Filters physical units to include only Concentration
"""
super(AssaySetupCompoundInlineFormSet, self).__init__(*args, **kwargs)
# Filter compound instances
compound_instances = CompoundInstance.objects.all().prefetch_related(
'compound',
'supplier'
)
compound_instances_dic = {
instance.id: instance for instance in compound_instances
}
# Filter on concentration but make a special exception for percent (%)
concentration_unit_queryset = PhysicalUnits.objects.filter(
unit_type__unit_type='Concentration'
).order_by(
'base_unit__unit',
'scale_factor'
) | PhysicalUnits.objects.filter(unit='%')
for form in self.forms:
# form.fields['start_time_unit'].queryset = time_unit_queryset
# form.fields['duration_unit'].queryset = time_unit_queryset
form.fields['concentration_unit'].queryset = concentration_unit_queryset
form.fields['compound_instance'].queryset = compound_instances
# All available compounds
form.fields['compound'] = forms.ModelChoiceField(
queryset=Compound.objects.all(),
widget=forms.Select(attrs={'class': 'form-control'})
)
# Text field (un-saved) for supplier
form.fields['supplier_text'] = forms.CharField(
initial='',
widget=forms.TextInput(attrs={'class': 'form-control'}),
required=False
)
# Text field (un-saved) for lot
form.fields['lot_text'] = forms.CharField(
initial='',
widget=forms.TextInput(attrs={'class': 'form-control'}),
required=False
)
# Receipt date
form.fields['receipt_date'] = forms.DateField(
required=False,
widget=forms.DateInput(attrs={
'class': 'form-control datepicker-input',
'autocomplete': 'off'
})
)
# If instance, apply initial values
if form.instance.compound_instance_id:
current_compound_instance = compound_instances_dic.get(form.instance.compound_instance_id)
form.fields['compound'].initial = current_compound_instance.compound
form.fields['supplier_text'].initial = current_compound_instance.supplier.name
form.fields['lot_text'].initial = current_compound_instance.lot
form.fields['receipt_date'].initial = current_compound_instance.receipt_date
# VERY SLOPPY
form.fields['compound'].widget.attrs['class'] += ' required'
current_field = 'compound'
if hasattr(form.fields[current_field], '_queryset'):
if hasattr(form.fields[current_field]._queryset, 'model'):
# Usually one would use a hyphen rather than an underscore
# form.fields[field].widget.attrs['data-app'] = form.fields[field]._queryset.model._meta.app_label
form.fields[current_field].widget.attrs['data_app'] = form.fields[current_field]._queryset.model._meta.app_label
# form.fields[field].widget.attrs['data-model'] = form.fields[field]._queryset.model._meta.object_name
form.fields[current_field].widget.attrs['data_model'] = form.fields[current_field]._queryset.model._meta.object_name
form.fields[current_field].widget.attrs['data_verbose_name'] = form.fields[current_field]._queryset.model._meta.verbose_name
# Possibly dumber
if hasattr(form.fields[current_field]._queryset.model, 'get_add_url_manager'):
form.fields[current_field].widget.attrs['data_add_url'] = form.fields[current_field]._queryset.model.get_add_url_manager()
# TODO THIS IS NOT DRY
def save(self, commit=True):
# Get forms_data (excluding those with delete or no data)
forms_data = [f for f in self.forms if f.cleaned_data and not f.cleaned_data.get('DELETE', False)]
forms_to_delete = [f for f in self.forms if f.cleaned_data and f.cleaned_data.get('DELETE', False)]
# Forms to be deleted
for form in forms_to_delete:
instance = super(BootstrapForm, form).save(commit=False)
if instance and instance.id and commit:
instance.delete()
matrix_item = self.instance
# Get all chip setup assay compound instances
assay_compound_instances = {
(
instance.compound_instance.id,
instance.concentration,
instance.concentration_unit.id,
instance.addition_time,
instance.duration,
instance.addition_location_id
): True for instance in AssaySetupCompound.objects.filter(
matrix_item_id=matrix_item.id
).prefetch_related(
'compound_instance__compound',
'concentration_unit'
)
}
# Get all Compound Instances
compound_instances = {
(
instance.compound.id,
instance.supplier.id,
instance.lot,
instance.receipt_date
): instance for instance in CompoundInstance.objects.all().prefetch_related(
'compound',
'supplier'
)
}
# Get all suppliers
suppliers = {
supplier.name: supplier for supplier in CompoundSupplier.objects.all()
}
# Forms to save
for form in forms_data:
instance = super(BootstrapForm, form).save(commit=False)
current_data = form.cleaned_data
# Bad
if not current_data.get('supplier_text'):
current_data['supplier_text'] = 'N/A'
if not current_data.get('lot_text'):
current_data['lot_text'] = 'N/A'
compound = current_data.get('compound')
supplier_text = current_data.get('supplier_text').strip()
lot_text = current_data.get('lot_text').strip()
receipt_date = current_data.get('receipt_date')
# Should be acquired straight from form
# concentration = current_data.get('concentration')
# concentration_unit = current_data.get('concentration_unit')
addition_time = 0
duration = 0
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
addition_time += current_data.get('addition_time_' + time_unit, 0) * conversion
duration += current_data.get('duration_' + time_unit, 0) * conversion
# Check if the supplier already exists
supplier = suppliers.get(supplier_text, '')
# Otherwise create the supplier
if not supplier:
supplier = CompoundSupplier(
name=supplier_text,
created_by=matrix_item.created_by,
created_on=matrix_item.created_on,
modified_by=matrix_item.modified_by,
modified_on=matrix_item.modified_on
)
# if commit:
# supplier.save()
# Always save the supplier
supplier.save()
suppliers.update({
supplier_text: supplier
})
# Check if compound instance exists
compound_instance = compound_instances.get((compound.id, supplier.id, lot_text, receipt_date), '')
if not compound_instance:
compound_instance = CompoundInstance(
compound=compound,
supplier=supplier,
lot=lot_text,
receipt_date=receipt_date,
created_by=matrix_item.created_by,
created_on=matrix_item.created_on,
modified_by=matrix_item.modified_by,
modified_on=matrix_item.modified_on
)
# if commit:
# compound_instance.save()
# ALWAYS MAKE A NEW COMPOUND INSTANCE
compound_instance.save()
compound_instances.update({
(compound.id, supplier.id, lot_text, receipt_date): compound_instance
})
# Update the instance with new data
instance.matrix_item = matrix_item
instance.compound_instance = compound_instance
instance.addition_time = addition_time
instance.duration = duration
# Save the AssayCompoundInstance
if commit:
conflicting_assay_compound_instance = assay_compound_instances.get(
(
instance.compound_instance.id,
instance.concentration,
instance.concentration_unit.id,
instance.addition_time,
instance.duration,
instance.addition_location_id
), None
)
if not conflicting_assay_compound_instance:
instance.save()
assay_compound_instances.update({
(
instance.compound_instance.id,
instance.concentration,
instance.concentration_unit.id,
instance.addition_time,
instance.duration,
instance.addition_location_id
): True
})
class AssaySetupCellForm(ModelFormSplitTime):
class Meta(object):
model = AssaySetupCell
exclude = tracking
def __init__(self, *args, **kwargs):
# self.static_choices = kwargs.pop('static_choices', None)
super(AssaySetupCellForm, self).__init__(*args, **kwargs)
# Change widget size
self.fields['cell_sample'].widget.attrs['style'] = 'width:75px;'
self.fields['passage'].widget.attrs['style'] = 'width:75px;'
self.fields['density_unit'].queryset = PhysicalUnits.objects.filter(availability__contains='cell').order_by('unit')
# TODO: IDEALLY THE CHOICES WILL BE PASSED VIA A KWARG
class AssaySetupCellFormSet(BaseModelFormSetForcedUniqueness):
custom_fields = (
'matrix_item',
'cell_sample',
'biosensor',
'density_unit',
'addition_location'
)
def __init__(self, *args, **kwargs):
self.matrix = kwargs.pop('matrix', None)
super(AssaySetupCellFormSet, self).__init__(*args, **kwargs)
filters = {'matrix_item': {'matrix_id': self.matrix.id}}
self.dic = get_dic_for_custom_choice_field(self, filters=filters)
for form in self.forms:
for field in self.custom_fields:
form.fields[field] = DicModelChoiceField(field, self.model, self.dic)
# Purge all classes
for field in form.fields:
form.fields[field].widget.attrs['class'] = ''
class AssaySetupSettingForm(ModelFormSplitTime):
class Meta(object):
model = AssaySetupCell
exclude = tracking
class AssaySetupSettingFormSet(BaseModelFormSetForcedUniqueness):
custom_fields = (
'matrix_item',
'setting',
'unit',
'addition_location'
)
def __init__(self, *args, **kwargs):
self.matrix = kwargs.pop('matrix', None)
super(AssaySetupSettingFormSet, self).__init__(*args, **kwargs)
filters = {'matrix_item': {'matrix_id': self.matrix.id }}
self.dic = get_dic_for_custom_choice_field(self, filters=filters)
for form in self.forms:
for field in self.custom_fields:
form.fields[field] = DicModelChoiceField(field, self.model, self.dic)
# Purge all classes
for field in form.fields:
form.fields[field].widget.attrs['class'] = ''
def _construct_form(self, i, **kwargs):
form = super(AssaySetupSettingFormSet, self)._construct_form(i, **kwargs)
for time_unit in list(TIME_CONVERSIONS.keys()):
# Create fields for Days, Hours, Minutes
form.fields['addition_time_' + time_unit] = forms.FloatField(initial=0)
form.fields['duration_' + time_unit] = forms.FloatField(initial=0)
# Change style
# form.fields['addition_time_' + time_unit].widget.attrs['style'] = 'width:75px;'
# form.fields['duration_' + time_unit].widget.attrs['style'] = 'width:75px;'
if form.instance.addition_time:
# Fill additional time
addition_time_in_minutes_remaining = form.instance.addition_time
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
initial_time_for_current_field = int(addition_time_in_minutes_remaining / conversion)
if initial_time_for_current_field:
form.fields['addition_time_' + time_unit].initial = initial_time_for_current_field
addition_time_in_minutes_remaining -= initial_time_for_current_field * conversion
# Add fractions of minutes if necessary
if addition_time_in_minutes_remaining:
form.fields['addition_time_minute'].initial += addition_time_in_minutes_remaining
if form.instance.duration:
# Fill duration
duration_in_minutes_remaining = form.instance.duration
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
initial_time_for_current_field = int(duration_in_minutes_remaining / conversion)
if initial_time_for_current_field:
form.fields['duration_' + time_unit].initial = initial_time_for_current_field
duration_in_minutes_remaining -= initial_time_for_current_field * conversion
# Add fractions of minutes if necessary
if duration_in_minutes_remaining:
form.fields['duration_minute'].initial += duration_in_minutes_remaining
return form
AssaySetupCompoundFormSetFactory = modelformset_factory(
AssaySetupCompound,
extra=1,
exclude=[tracking],
form=AssaySetupCompoundForm,
formset=AssaySetupCompoundFormSet,
can_delete=True
)
AssaySetupCellFormSetFactory = modelformset_factory(
AssaySetupCell,
extra=1,
exclude=[tracking],
form=AssaySetupCellForm,
formset=AssaySetupCellFormSet,
can_delete=True
)
AssaySetupSettingFormSetFactory = modelformset_factory(
AssaySetupSetting,
extra=1,
exclude=[tracking],
form=AssaySetupSettingForm,
formset=AssaySetupSettingFormSet,
can_delete=True
)
AssaySetupCompoundInlineFormSetFactory = inlineformset_factory(
AssayMatrixItem,
AssaySetupCompound,
extra=1,
exclude=[tracking],
form=AssaySetupCompoundForm,
formset=AssaySetupCompoundInlineFormSet,
can_delete=True
)
AssaySetupCellInlineFormSetFactory = inlineformset_factory(
AssayMatrixItem,
AssaySetupCell,
extra=1,
exclude=[tracking],
form=AssaySetupCellForm,
# formset=AssaySetupCellFormSet,
can_delete=True
)
AssaySetupSettingInlineFormSetFactory = inlineformset_factory(
AssayMatrixItem,
AssaySetupSetting,
extra=1,
exclude=[tracking],
form=AssaySetupSettingForm,
# formset=AssaySetupSettingFormSet,
can_delete=True
)
class AssayMatrixItemFullForm(SignOffMixin, BootstrapForm):
"""Frontend form for Items"""
class Meta(object):
model = AssayMatrixItem
widgets = {
'concentration': forms.NumberInput(attrs={'style': 'width:75px;'}),
'notebook_page': forms.NumberInput(attrs={'style': 'width:75px;'}),
'notes': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
'variance_from_organ_model_protocol': forms.Textarea(attrs={'cols': 50, 'rows': 2}),
}
# Assay Run ID is always bound to the parent Study
exclude = ('study', 'matrix', 'column_index', 'row_index') + tracking + restricted
def clean(self):
"""Cleans the Chip Setup Form
Ensures the the name is unique in the current study
Ensures that the data for a compound is complete
Prevents changes to the chip if data has been uploaded (avoiding conflicts between data and entries)
"""
super(AssayMatrixItemFullForm, self).clean()
# Make sure the barcode/ID is unique in the study
if AssayMatrixItem.objects.filter(
study_id=self.instance.study.id,
name=self.cleaned_data.get('name')
).exclude(id=self.instance.id):
raise forms.ValidationError({'name': ['ID/Barcode must be unique within study.']})
# Make sure the device matches if necessary
if self.instance.matrix.device and (self.instance.matrix.device != self.cleaned_data.get('device')):
raise forms.ValidationError(
{'device': ['The item\'s device must match the one specified in the Matrix: "{}"'.format(self.instance.matrix.device)]}
)
# SetupFormsMixin is unfortunate, but expedient
class AssayMatrixItemForm(SetupFormsMixin, SignOffMixin, BootstrapForm):
# CONTRIVED!
series_data = forms.CharField(required=False)
class Meta(object):
model = AssayMatrixItem
# WE OUGHT TO BE ABLE TO EDIT A SELECT FEW THINGS
fields = (
'name',
'group',
# Notes stuff worth keeping??
'scientist',
'notebook',
'notebook_page',
'notes'
) + flag_group
def __init__(self, *args, **kwargs):
super(AssayMatrixItemForm, self).__init__(*args, **kwargs)
# Gee, it might be nice to have a better way to query groups!
# Use chip groups if chip
if self.instance.matrix.representation == 'chips':
self.fields['group'].queryset = AssayGroup.objects.filter(
# We will always know the study, this can never be an add page
study_id=self.instance.study_id,
organ_model__device__device_type='chip'
).prefetch_related('organ_model__device')
# UGLY: DO NOT LIKE THIS
# Prepopulate series_data
self.fields['series_data'].initial = self.instance.study.get_group_data_string(get_chips=True)
# Otherwise use plate groups
# TODO: IF WE ARE BINDING PLATES TO MODELS, WE CANNOT DO THIS!
else:
self.fields['group'].queryset = AssayGroup.objects.filter(
study_id=self.instance.study_id,
# See above
# OOPS! WE NEED TO RESPECT THE ORGAN MODEL OR WHATEVER
# organ_model__device__device_type='plate'
# MATCH THE ORGAN MODEL ID OF CURRENT GROUP!
organ_model_id=self.instance.group.organ_model_id
).prefetch_related('organ_model__device')
# UGLY: DO NOT LIKE THIS
# Prepopulate series_data
self.fields['series_data'].initial = self.instance.study.get_group_data_string(plate_id=self.instance.matrix_id)
# DEPRECATED JUNK
class AssayMatrixItemInlineForm(forms.ModelForm):
class Meta(object):
model = AssayMatrixItem
exclude = ('study', 'matrix') + tracking
# TODO NEED TO TEST (NOTE FROM THE FUTURE: "NOT ANYMORE I DON'T, THIS IS DEPRECATED TRASH!")
class AssayMatrixItemFormSet(BaseInlineFormSetForcedUniqueness):
custom_fields = (
'device',
'organ_model',
'organ_model_protocol',
'failure_reason'
)
def __init__(self, *args, **kwargs):
# Get the study
self.study = kwargs.pop('study', None)
self.user = kwargs.pop('user', None)
super(AssayMatrixItemFormSet, self).__init__(*args, **kwargs)
if not self.study:
self.study = self.instance.study
self.dic = get_dic_for_custom_choice_field(self)
for form in self.forms:
for field in self.custom_fields:
form.fields[field] = DicModelChoiceField(field, self.model, self.dic)
if self.study:
form.instance.study = self.study
if form.instance.pk:
form.instance.modified_by = self.user
else:
form.instance.created_by = self.user
self.invalid_matrix_item_names = {
item.name: item.id for item in AssayMatrixItem.objects.filter(study_id=self.study.id)
}
def clean(self):
super(AssayMatrixItemFormSet, self).clean()
for index, form in enumerate(self.forms):
current_data = form.cleaned_data
if current_data and not current_data.get('DELETE', False):
if self.instance.number_of_columns:
if current_data.get('column_index') > self.instance.number_of_columns:
raise forms.ValidationError(
'An Item extends beyond the columns of the Matrix.'
' Increase the size of the Matrix and/or delete the offending Item if necessary.'
)
if current_data.get('row_index') > self.instance.number_of_rows:
raise forms.ValidationError(
'An Item extends beyond the rows of the Matrix.'
' Increase the size of the Matrix and/or delete the offending Item if necessary.'
)
# Make sure the barcode/ID is unique in the study
conflicting_name_item_id = self.invalid_matrix_item_names.get(current_data.get('name'), None)
if conflicting_name_item_id and conflicting_name_item_id != form.instance.pk:
form.add_error('name', 'This name conflicts with existing Item names in this Study.')
# Make sure the device matches if necessary
if self.instance.device and (self.instance.device != current_data.get('device')):
form.add_error('device', 'This device conflicts with the one listed in the Matrix.')
AssayMatrixItemFormSetFactory = inlineformset_factory(
AssayMatrix,
AssayMatrixItem,
formset=AssayMatrixItemFormSet,
form=AssayMatrixItemInlineForm,
extra=1,
exclude=('study',) + tracking
)
# CONTRIVED
class AssayStudyDeleteForm(forms.ModelForm):
class Meta(object):
model = AssayStudy
fields = []
class AssayStudySignOffForm(SignOffMixin, BootstrapForm):
class Meta(object):
model = AssayStudy
fields = [
'signed_off',
'signed_off_notes',
'release_date',
]
widgets = {
'signed_off_notes': forms.Textarea(attrs={'cols': 50, 'rows': 2}),
}
class AssayStudyStakeholderSignOffForm(SignOffMixin, BootstrapForm):
class Meta(object):
model = AssayStudyStakeholder
fields = ['signed_off', 'signed_off_notes']
widgets = {
'signed_off_notes': forms.Textarea(attrs={'cols': 50, 'rows': 2}),
}
class AssayStudyStakeholderFormSet(BaseInlineFormSet):
class Meta(object):
model = AssayStudyStakeholder
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(AssayStudyStakeholderFormSet, self).__init__(*args, **kwargs)
def get_queryset(self):
if not hasattr(self, '_queryset'):
# TODO FILTER OUT THOSE USER ISN'T ADMIN OF
# TODO REVIEW
user_admin_groups = self.user.groups.filter(name__contains=ADMIN_SUFFIX)
potential_groups = [group.name.replace(ADMIN_SUFFIX, '') for group in user_admin_groups]
queryset = super(AssayStudyStakeholderFormSet, self).get_queryset()
# Only include unsigned off forms that user is admin of!
self._queryset = queryset.filter(
group__name__in=potential_groups,
signed_off_by=None
)
return self._queryset
def save(self, commit=True):
for form in self.forms:
signed_off = form.cleaned_data.get('signed_off', False)
if signed_off and is_group_admin(self.user, form.instance.group.name):
form.instance.signed_off_by = self.user
form.instance.signed_off_date = timezone.now()
form.save(commit=True)
# Really, all factories should be declared like so (will have to do this for upcoming revision)
AssayStudyStakeholderFormSetFactory = inlineformset_factory(
AssayStudy,
AssayStudyStakeholder,
form=AssayStudyStakeholderSignOffForm,
formset=AssayStudyStakeholderFormSet,
extra=0,
can_delete=False
)
class AssayStudyDataUploadForm(BootstrapForm):
"""Form for Bulk Uploads"""
# Excluded for now
# overwrite_option = OVERWRITE_OPTIONS_BULK
# EVIL WAY TO GET PREVIEW DATA
preview_data = forms.BooleanField(initial=False, required=False)
class Meta(object):
model = AssayStudy
fields = ('bulk_file',)
def __init__(self, *args, **kwargs):
"""Init the Bulk Form
kwargs:
request -- the current request
"""
self.request = kwargs.pop('request', None)
super(AssayStudyDataUploadForm, self).__init__(*args, **kwargs)
def clean(self):
data = super(AssayStudyDataUploadForm, self).clean()
# Get the study in question
study = self.instance
# test_file = None
# TODO TODO TODO TODO TODO
if self.request and self.request.FILES and data.get('bulk_file'):
# Make sure that this isn't the current file
if not study.bulk_file or study.bulk_file != data.get('bulk_file'):
test_file = data.get('bulk_file', '')
file_processor = AssayFileProcessor(test_file, study, self.request.user)
# Process the file
file_processor.process_file()
# Evil attempt to acquire preview data
self.cleaned_data['preview_data'] = file_processor.preview_data
return self.cleaned_data
class AssayStudySetForm(SignOffMixin, BootstrapForm):
class Meta(object):
model = AssayStudySet
exclude = tracking
widgets = {
'description': forms.Textarea(attrs={'rows': 10})
}
def __init__(self, *args, **kwargs):
super(AssayStudySetForm, self).__init__(*args, **kwargs)
study_queryset = get_user_accessible_studies(
self.user
).prefetch_related(
'group__center_groups',
)
assay_queryset = AssayStudyAssay.objects.filter(
study_id__in=study_queryset.values_list('id', flat=True)
).prefetch_related(
'target',
'method',
'unit'
)
self.fields['studies'].queryset = study_queryset
self.fields['assays'].queryset = assay_queryset
# CONTRIVED
self.fields['studies'].widget.attrs['class'] = 'no-selectize'
self.fields['assays'].widget.attrs['class'] = 'no-selectize'
class AssayReferenceForm(BootstrapForm):
query_term = forms.CharField(
initial='',
required=False,
label='PubMed ID / DOI'
)
class Meta(object):
model = AssayReference
exclude = tracking
widgets = {
'query_term': forms.Textarea(attrs={'rows': 1}),
'title': forms.Textarea(attrs={'rows': 2}),
'authors': forms.Textarea(attrs={'rows': 1}),
'abstract': forms.Textarea(attrs={'rows': 10}),
'publication': forms.Textarea(attrs={'rows': 1}),
}
AssayStudyReferenceFormSetFactory = inlineformset_factory(
AssayStudy,
AssayStudyReference,
extra=1,
exclude=[]
)
AssayStudySetReferenceFormSetFactory = inlineformset_factory(
AssayStudySet,
AssayStudySetReference,
extra=1,
exclude=[]
)
# Convoluted
def process_error_with_annotation(prefix, row, column, full_error):
current_error = dict(full_error)
modified_error = []
for error_field, error_values in current_error.items():
for error_value in error_values:
modified_error.append([
'|'.join([str(x) for x in [
prefix,
row,
column,
error_field
]]) + '-' + error_value
])
return modified_error
class AssayMatrixFormNew(SetupFormsMixin, SignOffMixin, BootstrapForm):
# ADD test_types
test_type = forms.ChoiceField(
initial='control',
choices=TEST_TYPE_CHOICES
)
class Meta(object):
model = AssayMatrix
# ODD
fields = []
def __init__(self, *args, **kwargs):
"""Init the Study Form
Kwargs:
user -- the user in question
"""
# PROBABLY DON'T NEED THIS?
# self.user = kwargs.pop('user', None)
super(AssayMatrixFormNew, self).__init__(*args, **kwargs)
# SLOPPY
self.fields['test_type'].widget.attrs['class'] += ' no-selectize test-type'
# Bad
self.fields['test_type'].widget.attrs['style'] = 'width:100px;'
# PLEASE NOTE CRUDE HANDLING OF m2m
class AssayTargetForm(BootstrapForm):
# For adding to category m2m
category = forms.ModelMultipleChoiceField(
queryset=AssayCategory.objects.all().order_by('name'),
# Should this be required?
required=False,
# empty_label='All'
)
class Meta(object):
model = AssayTarget
exclude = tracking
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
def __init__(self, *args, **kwargs):
super(AssayTargetForm, self).__init__(*args, **kwargs)
# Get category if possible
if self.instance and self.instance.id:
self.initial_categories = AssayCategory.objects.filter(
targets__id=self.instance.id
)
self.fields['category'].initial = (
self.initial_categories
)
else:
self.initial_categories = AssayCategory.objects.none()
# Sort the methods
# Would it be better to have this applied to all method queries?
self.fields['methods'].queryset = AssayMethod.objects.all().order_by('name')
def save(self, commit=True):
new_target = super(AssayTargetForm, self).save(commit)
if commit:
if self.cleaned_data.get('category', None):
for current_category in self.cleaned_data.get('category', None):
current_category.targets.add(self.instance)
# Permit removals for the moment
# Crude removal
for initial_category in self.initial_categories:
if initial_category not in self.cleaned_data.get('category', None):
initial_category.targets.remove(self.instance)
return new_target
class AssayTargetRestrictedForm(BootstrapForm):
# For adding to category m2m
category = forms.ModelMultipleChoiceField(
queryset=AssayCategory.objects.all().order_by('name'),
# Should this be required?
required=False,
# empty_label='All'
)
# We don't actually want restricted users to meddle with the methods straight (they could remove methods)
method_proxy = forms.ModelMultipleChoiceField(
queryset=AssayMethod.objects.all().order_by('name'),
label='Methods'
)
class Meta(object):
model = AssayTarget
fields = ['category', 'method_proxy']
def __init__(self, *args, **kwargs):
super(AssayTargetRestrictedForm, self).__init__(*args, **kwargs)
# Get category if possible
# (It should always be possible, this form is for editing)
if self.instance and self.instance.id:
self.fields['category'].initial = (
AssayCategory.objects.filter(
targets__id=self.instance.id
)
)
self.fields['method_proxy'].initial = (
self.instance.methods.all()
)
def save(self, commit=True):
new_target = super(AssayTargetRestrictedForm, self).save(commit)
if commit:
if self.cleaned_data.get('category', None):
for current_category in self.cleaned_data.get('category', None):
current_category.targets.add(self.instance)
if self.cleaned_data.get('method_proxy', None):
for current_method in self.cleaned_data.get('method_proxy', None):
self.instance.methods.add(current_method)
return new_target
class AssayMethodForm(BootstrapForm):
# For adding to target m2m
targets = forms.ModelMultipleChoiceField(
queryset=AssayTarget.objects.all().order_by('name'),
# No longer required to prevent circularity with Target
required=False
)
class Meta(object):
model = AssayMethod
exclude = tracking
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
def __init__(self, *args, **kwargs):
super(AssayMethodForm, self).__init__(*args, **kwargs)
# Get target if possible
if self.instance and self.instance.id:
self.initial_targets = AssayTarget.objects.filter(
methods__id=self.instance.id
)
self.fields['targets'].initial = (
self.initial_targets
)
else:
self.initial_targets = AssayTarget.objects.none()
def save(self, commit=True):
new_method = super(AssayMethodForm, self).save(commit)
if commit:
for current_target in self.cleaned_data.get('targets', None):
current_target.methods.add(self.instance)
# Permit removals for the moment
# Crude removal
for initial_target in self.initial_targets:
if initial_target not in self.cleaned_data.get('targets', None):
initial_target.methods.remove(self.instance)
return new_method
class AssayMethodRestrictedForm(BootstrapForm):
# For adding to target m2m
targets = forms.ModelMultipleChoiceField(
queryset=AssayTarget.objects.all().order_by('name'),
# No longer required to prevent circularity with Target
required=False
)
class Meta(object):
model = AssayMethod
# Only include the target, we don't want anything else to change
fields = ['targets']
def __init__(self, *args, **kwargs):
super(AssayMethodRestrictedForm, self).__init__(*args, **kwargs)
# Get target if possible
# (It should always be possible, this form is only for editing)
if self.instance and self.instance.id:
self.fields['targets'].initial = (
AssayTarget.objects.filter(
methods__id=self.instance.id
)
)
def save(self, commit=True):
new_method = super(AssayMethodRestrictedForm, self).save(commit)
if commit:
# In the restricted form, one is allowed to add targets ONLY
for current_target in self.cleaned_data.get('targets', None):
current_target.methods.add(self.instance)
return new_method
class PhysicalUnitsForm(BootstrapForm):
class Meta(object):
model = PhysicalUnits
exclude = tracking + ('availability',)
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
class AssayMeasurementTypeForm(BootstrapForm):
class Meta(object):
model = AssayMeasurementType
exclude = tracking
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
class AssaySampleLocationForm(BootstrapForm):
class Meta(object):
model = AssaySampleLocation
exclude = tracking
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
class AssaySettingForm(BootstrapForm):
class Meta(object):
model = AssaySetting
exclude = tracking
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
class AssaySupplierForm(BootstrapForm):
class Meta(object):
model = AssaySupplier
exclude = tracking
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
# CONTRIVED
class AssayDataFileUploadDeleteForm(forms.ModelForm):
class Meta(object):
model = AssayDataFileUpload
fields = []
#####
# sck - ASSAY PLATE MAP START
# monkey patch to display method target and unit combo as needed in the assay plate map page
class AbstractClassAssayStudyAssay(AssayStudyAssay):
class Meta:
proxy = True
def __str__(self):
# return 'TARGET: {0} METHOD: {1} UNIT: {2}'.format(self.target, self.method, self.unit)
return '{2} --- TARGET: {0} by METHOD: {1}'.format(self.target, self.method, self.unit)
# Get info to populate pick lists; no qc needed on this form, just to use on webpage to allow user selections
class AssayPlateReadMapAdditionalInfoForm(forms.Form):
"""Form for Assay Plate Reader Map add/update/view extra info for dropdowns that are just used in GUI (not saved)."""
def __init__(self, *args, **kwargs):
study_id = kwargs.pop('study_id', None)
self.user = kwargs.pop('user', None)
super(AssayPlateReadMapAdditionalInfoForm, self).__init__(*args, **kwargs)
# note that the non-selectized versions are manipulated in javascript to facilitate the plate map
# they are not displayed to the user (they are hidden)
# something did very early in development...probably would do differently now
self.fields['se_matrix_item'].queryset = AssayMatrixItem.objects.filter(study_id=study_id).order_by('name',)
self.fields['ns_matrix_item'].queryset = AssayMatrixItem.objects.filter(study_id=study_id).order_by('name',)
self.fields['ns_matrix_item'].widget.attrs.update({'class': 'no-selectize'})
self.fields['ns_location'].widget.attrs.update({'class': 'no-selectize'})
self.fields['se_matrix'].queryset = AssayMatrix.objects.filter(
study_id=study_id
).order_by('name',)
self.fields['se_matrix'].widget.attrs.update({'class': ' required'})
self.fields['se_platemap'].queryset = AssayPlateReaderMap.objects.filter(
study_id=study_id
).order_by('name',)
self.fields['se_platemap'].widget.attrs.update({'class': ' required'})
# before got to development of calibration/processing the data
ns_matrix_item = forms.ModelChoiceField(
queryset=AssayMatrixItem.objects.none(),
required=False,
)
se_matrix_item = forms.ModelChoiceField(
queryset=AssayMatrixItem.objects.none(),
required=False,
)
se_matrix = forms.ModelChoiceField(
queryset=AssayMatrix.objects.none(),
required=False,
)
se_platemap = forms.ModelChoiceField(
queryset=AssayPlateReaderMap.objects.none(),
required=False,
)
se_main_well_use = forms.ChoiceField(
choices=assay_plate_reader_main_well_use_choices
)
se_blank_well_use = forms.ChoiceField(
choices=assay_plate_reader_blank_well_use_choices
)
se_time_unit = forms.ChoiceField(
choices=assay_plate_reader_time_unit_choices
)
se_location = forms.ModelChoiceField(
queryset=AssaySampleLocation.objects.all().order_by(
'name'
),
required=False,
)
ns_location = forms.ModelChoiceField(
queryset=AssaySampleLocation.objects.all(),
required=False,
)
se_increment_operation = forms.ChoiceField(
choices=(('divide', 'Divide'), ('multiply', 'Multiply'), ('subtract', 'Subtract'), ('add', 'Add'))
)
form_number_time = forms.DecimalField(
required=False,
initial=1,
)
form_number_time.widget.attrs.update({'class': 'form-control'})
form_number_default_time = forms.DecimalField(
required=False,
initial=1,
)
form_number_default_time.widget.attrs.update({'class': 'form-control'})
form_number_standard_value = forms.DecimalField(
required=False,
initial=0,
)
form_number_standard_value.widget.attrs.update({'class': 'form-control'})
form_number_dilution_factor = forms.DecimalField(
required=False,
initial=1,
)
form_number_dilution_factor.widget.attrs.update({'class': 'form-control'})
form_number_collection_volume = forms.DecimalField(
required=False,
initial=1,
)
form_number_collection_volume.widget.attrs.update({'class': 'form-control'})
form_number_collection_time = forms.DecimalField(
required=False,
initial=1,
)
form_number_collection_time.widget.attrs.update({'class': 'form-control'})
form_number_increment_value = forms.DecimalField(
required=False,
initial=1,
)
form_number_increment_value.widget.attrs.update({'class': 'form-control'})
# Parent for plate reader map page
class AssayPlateReaderMapForm(BootstrapForm):
"""Form for Assay Plate Reader Map"""
class Meta(object):
model = AssayPlateReaderMap
fields = [
# 'id', do not need in queryset
'name',
'description',
'device',
'study_assay',
'time_unit',
'volume_unit',
'standard_unit',
'cell_count',
'standard_molecular_weight',
'well_volume'
]
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
}
def __init__(self, *args, **kwargs):
self.study = kwargs.pop('study', None)
# self.user = kwargs.pop('user', None)
super(AssayPlateReaderMapForm, self).__init__(*args, **kwargs)
# need these or get blank study_assay in the update page (add page worked okay without)
if not self.study and self.instance.study:
self.study = self.instance.study
if self.study:
self.instance.study = self.study
# plate map instance - note, based on model, this is the name, use .id for the pk
my_instance = self.instance
# note that, if leave selectize on, will need to change js file $("#id_device").val(matrix_size);
# for tips on working with selectize, search in path for HANDY
# self.fields['device'].widget.attrs['class'] += ' no-selectize'
self.fields['name'].initial = "map-" + datetime.datetime.now().strftime("%Y%m%d")+"-"+datetime.datetime.now().strftime('%H:%M:%S')
self.fields['name'].widget.attrs['class'] += ' required'
self.fields['device'].widget.attrs['class'] += ' required'
self.fields['time_unit'].widget.attrs['class'] += ' required'
self.fields['standard_unit'].widget.attrs['class'] += ' required'
self.fields['study_assay'].queryset = AbstractClassAssayStudyAssay.objects.filter(
study_id=self.study
).prefetch_related(
'target',
'method',
'unit',
)
self.fields['study_assay'].widget.attrs['class'] += ' required'
# the selectize was causing PROBLEMS, I turned it off this field
# HANDY - turn of selectize at the form level
# self.fields['volume_unit'].widget.attrs.update({'class': 'no-selectize'})
# self.fields['volume_unit'].widget.attrs['class'] += ' form-control'
self.fields['standard_molecular_weight'].widget.attrs['class'] += ' form-control'
######
# START section to deal with raw data showing in the plate map after file assignment
# this will populate a dropdown that lets the user pick which file block to see on the page (map and calibrate)
# For the dropdown, only look for those file blocks that have a populated file block id
# get a record in the table with the plate index of 0 and that have a file block id
as_value_formset_with_file_block = AssayPlateReaderMapItemValue.objects.filter(
assayplatereadermap=my_instance.id
).prefetch_related(
'assayplatereadermapitem',
).filter(
assayplatereadermapitem__plate_index=0
).filter(
assayplatereadermapdatafileblock__isnull=False
).order_by(
'assayplatereadermapdatafileblock__id',
)
distinct_plate_map_with_select_string = []
distinct_plate_map_with_block_pk = []
number_filed_combos = len(as_value_formset_with_file_block)
# print("print number of filed combos-forms.py: ", number_filed_combos)
# queryset should have one record for each value SET that HAS a file-block associated to it
# make a choice list/field for the file-block combos for this plate map
if number_filed_combos > 0:
i = 0
for record in as_value_formset_with_file_block:
short_file_name = os.path.basename(str(record.assayplatereadermapdatafile.plate_reader_file))
data_block_label = str(record.assayplatereadermapdatafileblock.data_block)
data_block_metadata = record.assayplatereadermapdatafileblock.data_block_metadata
# data_file_id_str = str(record.assayplatereadermapdatafile.id)
data_file_block_id_str = str(record.assayplatereadermapdatafileblock.id)
# make a choice tuple list for showing selections and a choice tuple list of containing the file pk and block pk for javascript
pick_value = str(i)
pick_string = 'FILE: ' + short_file_name + ' BLOCK: ' + data_block_label + ' ' + data_block_metadata
# pick_string_pk = data_file_id_str + '-' + data_file_block_id_str
pick_string_block_pk = data_file_block_id_str
distinct_plate_map_with_select_string.append((pick_value, pick_string))
distinct_plate_map_with_block_pk.append((pick_value, pick_string_block_pk))
# print("looking for unique blocks counter ", i)
i = i + 1
# self.fields['ns_file_pk_block_pk'].widget.attrs['class'] += ' no-selectize'
self.fields['form_number_file_block_combos'].required = False
self.fields['form_number_file_block_combos'].initial = number_filed_combos
# file block options associated with a specific plate map
self.fields['se_block_select_string'].required = False
self.fields['se_block_select_string'].widget.attrs['class'] += ' required'
self.fields['se_block_select_string'].choices = distinct_plate_map_with_select_string
self.fields['ns_block_select_pk'].required = False
self.fields['ns_block_select_pk'].widget.attrs.update({'class': 'no-selectize'})
self.fields['ns_block_select_pk'].choices = distinct_plate_map_with_block_pk
self.fields['se_form_calibration_curve'].widget.attrs.update({'class': ' required'})
self.fields['form_make_mifc_on_submit'].widget.attrs.update({'class': ' big-checkbox'})
self.fields['se_form_calibration_curve'].required = False
self.fields['se_form_blank_handling'].required = False
self.fields['radio_replicate_handling_average_or_not'].required = False
# HANDY - save problems, this is likely the cause (required fields!)
# self.fields['form_data_processing_multiplier_string'].required = False
#
# self.fields['form_data_processing_multiplier_string_short'].required = False
# self.fields['form_data_processing_multiplier_value_short'].required = False
# these multiplier fields were added to explain the multiplier in a table
# the long string was unacceptable to the project PI
# these really don't have to be form fields (not needed for data processing), but it was just easier/faster
# self.fields['form_data_processing_multiplier_string1'].required = False
# self.fields['form_data_processing_multiplier_string2'].required = False
# self.fields['form_data_processing_multiplier_string3'].required = False
# self.fields['form_data_processing_multiplier_string4'].required = False
# self.fields['form_data_processing_multiplier_string5'].required = False
# self.fields['form_data_processing_multiplier_string6'].required = False
# self.fields['form_data_processing_multiplier_string7'].required = False
# self.fields['form_data_processing_multiplier_string8'].required = False
# self.fields['form_data_processing_multiplier_string9'].required = False
# calibration fields - only a few are really needed as form fields (eg the calibration curve used, bounds)
# many are not really needed in the data processing and could be handled differently
self.fields['form_data_parsable_message'].required = False
self.fields['form_calibration_curve_method_used'].required = False
# self.fields['form_calibration_equation'].required = False
# self.fields['form_calibration_rsquared'].required = False
# self.fields['form_calibration_parameter_1_string'].required = False
# self.fields['form_calibration_parameter_2_string'].required = False
# self.fields['form_calibration_parameter_3_string'].required = False
# self.fields['form_calibration_parameter_4_string'].required = False
# self.fields['form_calibration_parameter_5_string'].required = False
# self.fields['form_calibration_parameter_1_value'].required = False
# self.fields['form_calibration_parameter_2_value'].required = False
# self.fields['form_calibration_parameter_3_value'].required = False
# self.fields['form_calibration_parameter_4_value'].required = False
# self.fields['form_calibration_parameter_5_value'].required = False
self.fields['form_calibration_standard_fitted_min_for_e'].required = False
self.fields['form_calibration_standard_fitted_max_for_e'].required = False
self.fields['form_calibration_sample_blank_average'].required = False
self.fields['form_calibration_standard_standard0_average'].required = False
self.fields['form_calibration_method'].required = False
self.fields['form_calibration_target'].required = False
self.fields['form_calibration_unit'].required = False
self.fields['form_number_standards_this_plate'].required = False
self.fields['form_hold_the_data_block_metadata_string'].required = False
self.fields['form_hold_the_omits_string'].required = False
self.fields['form_hold_the_notes_string'].required = False
# Need a valid choice field.
# When the selected plate map has standards, the user will never see this field and will not need it.
# If the plate does not have standards, the user will need the option to pick to borrow standards from another plate.
# Lab representative (ie Richard) indicated that standards, standard blanks, and sample blanks would all be borrowed from the same plate!
# does this plate map have standards?
does_this_plate_have_standards = AssayPlateReaderMapItem.objects.filter(
assayplatereadermap=my_instance.id
).filter(
well_use='standard'
)
number_standards_wells_on_plate = len(does_this_plate_have_standards)
choiceBorrowData = (0, 'Select One'),
choiceBorrowDataToPlateMap = (0, 0),
if number_standards_wells_on_plate > 0:
# left - file block pk in both
# right is a string of the data block meta data for selection of data block pk (left)
choiceBorrowData = choiceBorrowData
# right is plate map pk
choiceBorrowDataToPlateMap = choiceBorrowDataToPlateMap
else:
# if we have to borrow standards, need a list to pick from - add to choiceBorrowData
# need to borrow standards from another plate
# 20200510 - moving this to here from ajax call. Might move back depending on performance.
# most users will not do it this way....
as_value_formset_with_file_block_standard = AssayPlateReaderMapItemValue.objects.filter(
study_id=self.study
).filter(
assayplatereadermapdatafileblock__isnull=False
).prefetch_related(
'assayplatereadermapdatafileblock',
'assayplatereadermap',
'assayplatereadermapitem',
).filter(
assayplatereadermapitem__well_use='standard'
).order_by(
'assayplatereadermapdatafileblock__id', 'assayplatereadermapitem__well_use'
)
# print('as_value_formset_with_file_block_standard')
# print(as_value_formset_with_file_block_standard)
prev_file = "none"
prev_data_block_file_specific_pk = 0
# queryset should have one record for each value SET that HAS a file-block and at least one standard associated to it
if len(as_value_formset_with_file_block_standard) > 0:
for record in as_value_formset_with_file_block_standard:
short_file_name = os.path.basename(str(record.assayplatereadermapdatafile.plate_reader_file))
# this is the data block of the file (for file 0 to something...)
data_block_file_specific_pk = record.assayplatereadermapdatafileblock.data_block
if prev_file == short_file_name and prev_data_block_file_specific_pk == data_block_file_specific_pk:
pass
else:
data_platemap_pk = record.assayplatereadermap_id
data_platemap_name = record.assayplatereadermap.name
data_block_metadata = record.assayplatereadermapdatafileblock.data_block_metadata
data_block_database_pk = record.assayplatereadermapdatafileblock.id
# make a choice tuple list for showing selections and a choice tuple list of containing the file pk and block pk for javascript
pick_string = 'PLATEMAP: ' + data_platemap_name + ' FILE: ' + short_file_name + ' BLOCK: ' + data_block_metadata + ' (' + str(
data_block_file_specific_pk) + ')'
addString1 = (data_block_database_pk, pick_string),
choiceBorrowData = choiceBorrowData + addString1
addString2 = (data_block_database_pk, data_platemap_pk),
choiceBorrowDataToPlateMap = choiceBorrowDataToPlateMap + (addString2)
prev_file = short_file_name
prev_data_block_file_specific_pk = data_block_file_specific_pk
# print('choiceBorrowData')
# print(choiceBorrowData)
# print('choiceBorrowDataToPlateMap')
# print(choiceBorrowDataToPlateMap)
self.fields['se_block_standard_borrow_string'].choices = choiceBorrowData
self.fields['ns_block_standard_borrow_string_to_block_pk_back_to_platemap_pk'].choices = choiceBorrowDataToPlateMap
self.fields['ns_block_standard_borrow_string_to_block_pk_back_to_platemap_pk'].required = False
self.fields['se_block_standard_borrow_string'].widget.attrs['class'] += ' required'
self.fields['se_block_standard_borrow_string'].required = False
# enable the selection of a plate to borrow standards from by letting the user see a string of info about the DATA BLOCK (not just the plate map!)
se_block_standard_borrow_string = forms.ChoiceField()
ns_block_standard_borrow_string_to_block_pk_back_to_platemap_pk = forms.ChoiceField()
# pk of the file block borrowing when no standards on the current plate (store it here)
form_block_standard_borrow_pk_single_for_storage = forms.IntegerField(
required=False,
)
# pk of the plate map associated with the file block borrowing when no standards on the current plate (store it here)
form_block_standard_borrow_pk_platemap_single_for_storage = forms.IntegerField(
required=False,
)
# here here, remove these next two after checking other way works
# form_hold_the_study_id = forms.IntegerField(
# required=False,
# )
# form_hold_the_platemap_id = forms.IntegerField(
# required=False,
# )
form_hold_the_data_block_metadata_string = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
)
form_hold_the_omits_string = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
)
form_hold_the_notes_string = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
)
form_block_file_data_block_selected_pk_for_storage = forms.IntegerField(
required=False,
)
form_number_file_block_combos = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}))
# string of selected file block (selected in dropdown)
se_block_select_string = forms.ChoiceField()
# pk of selected file block (stays lined up with the string)
ns_block_select_pk = forms.ChoiceField()
# END section to deal with raw data showing in the plate map after file assignment and deal with standard in a different file block
# print(calibration_choices)
# processing the data fields added
se_form_calibration_curve = forms.ChoiceField(
choices=(
calibration_choices
# ('select_one', 'Select One'),
# ('no_calibration', 'No Calibration'),
# ('best_fit', 'Best Fit'),
# ('logistic4', '4 Parameter Logistic w/fitted bounds'),
# ('logistic4a0', '4 Parameter Logistic w/lower bound = 0'),
# ('logistic4f', '4 Parameter Logistic w/user specified bound(s)'),
# ('linear', 'Linear w/fitted intercept'),
# ('linear0', 'Linear w/intercept = 0'),
# ('log', 'Logarithmic'),
# ('poly2', 'Quadratic Polynomial'),
# ('select_one', 'Select One (n = standard concentration, s = signal)'),
# ('no_calibration', 'No Calibration'),
# ('best_fit', 'Best Fit'),
# ('logistic4', '4 Parameter Logistic (s = ((A-D)/(1.0+((n/C)**B))) + D)'),
# ('linear', 'Linear w/fitted intercept (s = B*n + A)'),
# ('linear0', 'Linear w/intercept = 0 (s = B*n)'),
# ('log', 'Logarithmic (s = B*ln(n) + A)'),
# ('poly2', 'Polynomial (s = C*n**2 + B*n + A)'),
)
)
# forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}))
# se_form_blank_handling = forms.ChoiceField(widget=forms.RadioSelect(attrs={'disabled': 'disabled'}),
se_form_blank_handling = forms.ChoiceField(
choices=(('subtracteachfromeach', 'Subtracting Average STANDARD Blanks from STANDARDS and Average SAMPLE Blanks from SAMPLES'),
('subtractstandardfromstandard', 'Subtracting Average STANDARD Blanks from STANDARDS (ignore sample blanks)'),
('subtractsamplefromsample', 'Subtracting Average SAMPLE Blanks from SAMPLES (ignore standard blanks)'),
('subtractstandardfromall', 'Subtracting Average STANDARD Blanks from the STANDARDS and SAMPLES'),
('subtractsamplefromall', 'Subtracting Average SAMPLE Blanks from the STANDARDS and SAMPLES'),
('ignore', 'Ignoring the Blanks')), initial='subtracteachfromeach'
)
form_min_standard = forms.DecimalField(
required=False,
)
form_min_standard.widget.attrs.update({'class': 'form-control'})
form_max_standard = forms.DecimalField(
required=False,
)
form_max_standard.widget.attrs.update({'class': 'form-control'})
form_logistic4_A = forms.DecimalField(
required=False,
)
form_logistic4_A.widget.attrs.update({'class': 'form-control'})
form_logistic4_D = forms.DecimalField(
required=False,
)
form_logistic4_D.widget.attrs.update({'class': 'form-control'})
form_data_processing_multiplier = forms.DecimalField(
required=False,
initial=1,
)
form_data_processing_multiplier.widget.attrs.update({'class': 'form-control'})
# works but only one line
# form_data_processing_multiplier_string = forms.CharField(
# required=False,
# initial="",
# )
# works but only one line
# form_data_processing_multiplier_string = forms.CharField()
# form_data_processing_multiplier_string.widget.attrs.update({'required': False, 'initial': ""})
# HANDY - how to make an extra field a widget so can manipulate it eg readonly
# form_data_processing_multiplier_string = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 3, 'readonly': 'readonly', 'required': False})
# )
#
# form_data_processing_multiplier_string_short = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 1, 'readonly': 'readonly'}))
# form_data_processing_multiplier_value_short = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 1, 'readonly': 'readonly'}))
#
# form_data_processing_multiplier_string1 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
# form_data_processing_multiplier_string2 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
# form_data_processing_multiplier_string3 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
# form_data_processing_multiplier_string4 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
# form_data_processing_multiplier_string5 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
# form_data_processing_multiplier_string6 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
# form_data_processing_multiplier_string7 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
# form_data_processing_multiplier_string8 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
# form_data_processing_multiplier_string9 = forms.CharField(
# widget=forms.Textarea(attrs={'rows': 2, 'readonly': 'readonly'}))
form_data_parsable_message = forms.CharField(
widget=forms.Textarea(attrs={'rows': 6, 'readonly': 'readonly', 'required': False})
)
form_calibration_curve_method_used = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly', 'required': False, 'initial': '-'})
)
# form_calibration_equation = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly', 'required': False, 'initial': '-'})
# )
# form_calibration_rsquared = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
# )
radio_replicate_handling_average_or_not = forms.ChoiceField(
# widget=forms.RadioSelect(attrs={'id': 'value'}),
widget=forms.RadioSelect,
choices=[
('average', 'Show Averages the Replicate Samples'),
('each', 'Show Each Sample')])
# ('average', 'Send the Average of the Replicates to the Study Summary'),
# ('each', 'Send Each Replicates Value to the Study Summary')])
radio_standard_option_use_or_not = forms.ChoiceField(
required=False,
widget=forms.RadioSelect,
choices=[('no_calibration', 'No Calibration'), ('pick_block', 'Pick a Block of Data with Standards')])
# going to need to pass some calibration parameters
# think the max I will need is 5 for 5 parameter logistic
# going to need to keep track of order
# form_calibration_parameter_1_string = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
# )
# form_calibration_parameter_2_string = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
# )
# form_calibration_parameter_3_string = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
# )
# form_calibration_parameter_4_string = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
# )
# form_calibration_parameter_5_string = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
# )
# form_calibration_parameter_1_value = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
# )
# form_calibration_parameter_2_value = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
# )
# form_calibration_parameter_3_value = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
# )
# form_calibration_parameter_4_value = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
# )
# form_calibration_parameter_5_value = forms.CharField(
# widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
# )
form_calibration_standard_fitted_min_for_e = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
)
form_calibration_standard_fitted_max_for_e = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
)
form_calibration_sample_blank_average = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
)
form_calibration_standard_standard0_average = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly','initial': '-'})
)
form_calibration_method = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
)
form_calibration_target = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
)
form_calibration_unit = forms.CharField(
widget=forms.TextInput(attrs={'readonly': 'readonly', 'initial': '-'})
)
form_number_standards_this_plate = forms.IntegerField(
required=False,
initial=1,
)
form_make_mifc_on_submit = forms.BooleanField(required=False)
# Let them name the maps the same if they really want to. Does not really matter to me
# def clean(self):
# # HANDY FORCE UNIQUE - this will return back to the form instead of showing the user an error
# cleaned_data = super(AssayPlateReaderMapForm, self).clean()
#
# if AssayPlateReaderMap.objects.filter(
# study_id=self.instance.study.id,
# name=self.cleaned_data.get('name', '')
# ).exclude(pk=self.instance.pk).count():
# raise forms.ValidationError({'name': ['Plate Map name must be unique within study. This plate map is now corrupted. Go back to the Plate Map List and click to Add Plate Map and start again.']})
#
# return cleaned_data
def clean(self):
# First thing in clean
# Call super for data
data = super(AssayPlateReaderMapForm, self).clean()
# After initial stuff done
self.process_file(save=False, called_from='clean')
return data
def save(self, commit=True):
# First thing in save
# Make sure to pass commit to the super call (don't want accidental saves)
map = super(AssayPlateReaderMapForm, self).save(commit=commit)
# Only save the file if commit is true
if commit:
self.process_file(save=True, called_from="save")
return map
def process_file(self, save=False, called_from="c"):
#### START When saving AssayPlateReaderMapUpdate after a calibration
# if user checked the box to send to study summary, make that happen
data = self.cleaned_data
# study = get_object_or_404(AssayStudy, pk=self.kwargs['study_id'])
if data.get('form_make_mifc_on_submit'):
# print(".unit ",data.get('standard_unit').unit)
# print(".id ", data.get('standard_unit').id)
# .unit
# µg / mL
# .id
# 6
# print(".unit ",data.get('standard_unit').unit)
# print(".id ", data.get('standard_unit').id)
if data.get('form_block_standard_borrow_pk_single_for_storage') == None:
borrowed_block_pk = -1
else:
borrowed_block_pk = data.get('form_block_standard_borrow_pk_single_for_storage')
if data.get('form_block_standard_borrow_pk_platemap_single_for_storage') == None:
borrowed_platemap_pk = -1
else:
borrowed_platemap_pk = data.get(
'form_block_standard_borrow_pk_platemap_single_for_storage')
# 20201104 when no_calibration is selected, the _used field does not get populated..deal with it here
use_curve = 'no_calibration'
use_curve_long = data.get('form_calibration_curve_method_used')
if data.get('se_form_calibration_curve') == 'no_calibration' or data.get('se_form_calibration_curve') == 'select_one':
use_curve_long = 'no_calibration'
else:
use_curve = find_a_key_by_value_in_dictionary(CALIBRATION_CURVE_MASTER_DICT, use_curve_long)
if len(use_curve.strip()) == 0:
err_msg = "The calibration method " + use_curve_long + " was not found in the cross reference list."
# print(err_msg)
raise forms.ValidationError(err_msg)
# form.instance.study
# make a dictionary to send to the utils.py when call the function
set_dict = {
'called_from': 'form_save',
'study': self.instance.study.id,
'pk_platemap': self.instance.id,
'pk_data_block': data.get('form_block_file_data_block_selected_pk_for_storage'),
'plate_name': data.get('name'),
'form_calibration_curve': use_curve,
'multiplier': data.get('form_data_processing_multiplier'),
'unit': data.get('form_calibration_unit'),
'standard_unit': data.get('standard_unit').unit,
'form_min_standard': data.get('form_calibration_standard_fitted_min_for_e'),
'form_max_standard': data.get('form_calibration_standard_fitted_max_for_e'),
'form_logistic4_A': data.get('form_logistic4_A'),
'form_logistic4_D': data.get('form_logistic4_D'),
'form_blank_handling': data.get('se_form_blank_handling'),
'radio_standard_option_use_or_not': data.get('radio_standard_option_use_or_not'),
'radio_replicate_handling_average_or_not_0': data.get(
'radio_replicate_handling_average_or_not'),
'borrowed_block_pk': borrowed_block_pk,
'borrowed_platemap_pk': borrowed_platemap_pk,
'count_standards_current_plate': data.get('form_number_standards_this_plate'),
'target': data.get('form_calibration_target'),
'method': data.get('form_calibration_method'),
'time_unit': data.get('time_unit'),
'volume_unit': data.get('volume_unit'),
'user_notes': data.get('form_hold_the_notes_string'),
'user_omits': data.get('form_hold_the_omits_string'),
'plate_size': data.get('device'),
}
# this function is in utils.py that returns data
data_mover = plate_reader_data_file_process_data(set_dict)
# 20201105 one row of data mover
# {'matrix_item_name': '13', 'cross_reference': 'Plate Reader Tool', 'plate_name': 'map-20201105-07:47:13',
# 'well_name': 'D7 C7 E7', 'day': '1.0', 'hour': '0', 'minute': '0', 'target': 'Decay Time',
# 'subtarget': 'none', 'method': 'EarlyTox Cardiotoxicity Kit (Molecular Devices: R8211)',
# 'location_name': 'Basolateral', 'processed_value': '25195871.42980029', 'unit': 'ng/mL', 'replicate': 1,
# 'caution_flag': '', 'exclude': ' ', 'notes': '',
# 'sendmessage': 'Fitting method: linear; Standard minimum: 0.0; Standard maximum: 100.0; '}, {
# 'matrix_item_name': '13', 'cross_reference': 'Plate Reader Tool', 'plate_name': 'map-20201105-07:47:13',
# 'well_name': 'C8 E8 D8', 'day': '2.0', 'hour': '0', 'minute': '0', 'target': 'Decay Time',
# 'subtarget': 'none', 'method': 'EarlyTox Cardiotoxicity Kit (Molecular Devices: R8211)',
# 'location_name': 'Basolateral', 'processed_value': '24630641.60638611', 'unit': 'ng/mL', 'replicate': 1,
# 'caution_flag': '', 'exclude': ' ', 'notes': '',
# 'sendmessage': 'Fitting method: linear; Standard minimum: 0.0; Standard maximum: 100.0; '}, {
# 'matrix_item_name': '13', 'cross_reference': 'Plate Reader Tool', 'plate_name': 'map-20201105-07:47:13',
# 'well_name': 'C9 E9 D9', 'day': '3.0', 'hour': '0', 'minute': '0', 'target': 'Decay Time',
# 'subtarget': 'none', 'method': 'EarlyTox Cardiotoxicity Kit (Molecular Devices: R8211)',
# 'location_name': 'Basolateral', 'processed_value': '34903839.32472848', 'unit': 'ng/mL', 'replicate': 1,
# 'caution_flag': '', 'exclude': ' ', 'notes': '',
# 'sendmessage': 'Fitting method: linear; Standard minimum: 0.0; Standard maximum: 100.0; '}
utils_key_column_header = {
'matrix_item_name': COLUMN_HEADERS[0],
'cross_reference': COLUMN_HEADERS[1],
'plate_name': COLUMN_HEADERS[2],
'well_name': COLUMN_HEADERS[3],
'day': COLUMN_HEADERS[4],
'hour': COLUMN_HEADERS[5],
'minute': COLUMN_HEADERS[6],
'target': COLUMN_HEADERS[7],
'subtarget': COLUMN_HEADERS[8],
'method': COLUMN_HEADERS[9],
'location_name': COLUMN_HEADERS[10],
'processed_value': COLUMN_HEADERS[11],
'unit': COLUMN_HEADERS[12],
'replicate': COLUMN_HEADERS[13],
'caution_flag': COLUMN_HEADERS[14],
'exclude': COLUMN_HEADERS[15],
'notes': COLUMN_HEADERS[16],
'sendmessage': 'Processing Details'
}
column_table_headers_average = list(COLUMN_HEADERS)
column_table_headers_average.append('Processing Details')
# what comes back in 9 is a dictionary of data rows with dict keys as shown in utils_key_column_header
list_of_dicts = data_mover[9]
list_of_lists_mifc_headers_row_0 = [None] * (len(list_of_dicts) + 1)
list_of_lists_mifc_headers_row_0[0] = column_table_headers_average
i = 1
for each_dict_in_list in list_of_dicts:
list_each_row = []
for this_mifc_header in column_table_headers_average:
# find the key in the dictionary that we need
utils_dict_header = find_a_key_by_value_in_dictionary(utils_key_column_header,
this_mifc_header)
# get the value that is associated with this header in the dict
this_value = each_dict_in_list.get(utils_dict_header)
# add the value to the list for this dict in the list of dicts
list_each_row.append(this_value)
# when down with the dictionary, add the complete list for this row to the list of lists
list_of_lists_mifc_headers_row_0[i] = list_each_row
i = i + 1
# First make a csv from the list_of_lists (using list_of_lists_mifc_headers_row_0)
# or self.objects.study
my_study = self.instance.study
# my_user = self.request.user
my_user = self.user
my_platemap = self.instance
my_data_block_pk = data.get('form_block_file_data_block_selected_pk_for_storage')
platenamestring1 = str(my_platemap)
metadatastring1 = str(data.get('form_hold_the_data_block_metadata_string'))
# Specify the file for use with the file uploader class
# some of these caused errors in the file name so remove them
# Luke and Quinn voted for all the symbols out instead of a few
platenamestring = re.sub('[^a-zA-Z0-9_]', '', platenamestring1)
metadatastring = re.sub('[^a-zA-Z0-9_]', '', metadatastring1)
name_the_file = 'PLATE-{}-{}--METADATA-{}-{}'.format(
my_platemap.id, platenamestring,
my_data_block_pk, metadatastring
)
# PLEASE NOTE THE NAIVE TRUNCATION HERE
# Revise soon
bulk_location = upload_file_location(
my_study,
name_the_file
)[:97]
# Make sure study has directories
if not os.path.exists(MEDIA_ROOT + '/data_points/{}'.format(my_study.id)):
os.makedirs(MEDIA_ROOT + '/data_points/{}'.format(my_study.id))
# Need to import from models
# Avoid magic string, use media location
file_location = MEDIA_ROOT.replace('mps/../', '', 1) + '/' + bulk_location + '.csv'
# Should make a csv writer to avoid repetition
file_to_write = open(file_location, 'w')
csv_writer = csv.writer(file_to_write, dialect=csv.excel)
# Add the UTF-8 BOM
list_of_lists_mifc_headers_row_0[0][0] = '\ufeff' + list_of_lists_mifc_headers_row_0[0][0]
# Write the lines here here uncomment this
for one_line_of_data in list_of_lists_mifc_headers_row_0:
csv_writer.writerow(one_line_of_data)
file_to_write.close()
new_mifc_file = open(file_location, 'rb')
file_processor = AssayFileProcessor(
new_mifc_file,
my_study,
my_user,
save=save,
full_path='/media/' + bulk_location + '.csv'
)
# Process the file
file_processor.process_file()
#### END When saving AssayPlateReaderMapUpdate after a calibration
def find_a_key_by_value_in_dictionary(this_dict, this_header):
"""This is a function to find a key by value."""
my_key = ''
for key, value in this_dict.items():
if value == this_header:
my_key = key
break
return my_key
# There should be a complete set of items for each saved plate map (one for each well in the selected plate)
class AssayPlateReaderMapItemForm(forms.ModelForm):
"""Form for Assay Plate Reader Map Item"""
class Meta(object):
model = AssayPlateReaderMapItem
# exclude = tracking + ('study',)
fields = [
# 'id', do not need
'matrix_item',
'location',
'name',
# 'row_index',
# 'column_index',
'plate_index',
'standard_value',
'dilution_factor',
'collection_volume',
'collection_time',
'default_time',
'well_use',
]
# keep here for reference of what not to do if want form to be selectized
# def __init__(self, *args, **kwargs):
# super(AssayPlateReaderMapItemForm, self).__init__(*args, **kwargs)
# self.fields['name'].widget.attrs.update({'class': ' no-selectize'})
# # 20200428 for user entered information
# # 20200609 these are adding too much overhead, and did not use in data process, so remove these
# form_user_entered_notes = forms.CharField(
# initial='-',
# required=False,
# widget=forms.Textarea(attrs={'cols': 10, 'rows': 1}),
# )
# form_user_entered_omit_from_average = forms.BooleanField(required=False, )
###########
# 20200522 getting rid of the value form all together since not allowing editing after values attached to plate map.
# GET RID OF THIS
# # Item VALUES are sets that correspond to items. Each set should have a match to a well in the plate map.
# # If not file/blocks attached to plate map, will have one set of values (with one value for each item)
# # If one file/block attached to plate map, will have two sets of values (one for the file, one null file) etc.
# class AssayPlateReaderMapItemValueForm(forms.ModelForm):
# """Form for Assay Plate Reader Map Item Value"""
#
# # 20200113 - changing so this formset is only called when adding and when update or view when no data are yet attached
#
# class Meta(object):
# model = AssayPlateReaderMapItemValue
# # it is worth noting that there is a nuance to excluding or setting fields
# # exclude = tracking + ('study', )
# fields = [
# # 'id', do not need
# # 'assayplatereadermapdatafile', do not need
# # 'assayplatereadermapitem', do not need
# # next item - can remove later - do not need since, if there are matches, this formset will not be called
# # but check rest is working first since will also affect formset (the custom_fields)
# # 'assayplatereadermapdatafileblock',
# 'plate_index',
# 'raw_value',
# 'time',
# 'well_use',
# ]
###########
# Formset for items
# IMPORTANT - custom_fields remove the select options for all the formsets - saves ALOT of page load time is long lists
class AssayPlateReaderMapItemFormSet(BaseInlineFormSetForcedUniqueness):
custom_fields = (
'matrix_item',
'location',
)
def __init__(self, *args, **kwargs):
self.study = kwargs.pop('study', None)
self.user = kwargs.pop('user', None)
super(AssayPlateReaderMapItemFormSet, self).__init__(*args, **kwargs)
if not self.study:
self.study = self.instance.study
# use the filter to get matrix items in this study ONLY - makes the dic much smaller
# HANDY - this speed up the custom_fields
filters = {'matrix_item': {'study_id': self.study.id}}
self.dic = get_dic_for_custom_choice_field(self, filters=filters)
for form in self.forms:
for field in self.custom_fields:
form.fields[field] = DicModelChoiceField(field, self.model, self.dic)
if self.study:
form.instance.study = self.study
if form.instance.pk:
form.instance.modified_by = self.user
else:
form.instance.created_by = self.user
###########
# 20200522 getting rid of the value form all together since not allowing editing after values attached to plate map.
# # GET RID OF THIS
# # Formset for item values
# class AssayPlateReaderMapItemValueFormSet(BaseInlineFormSetForcedUniqueness):
# # changed way this worked on 20200114 and do not need this field any more
# # custom_fields = (
# # 'assayplatereadermapdatafileblock',
# # )
#
# def __init__(self, *args, **kwargs):
# self.study = kwargs.pop('study', None)
# self.user = kwargs.pop('user', None)
# super(AssayPlateReaderMapItemValueFormSet, self).__init__(*args, **kwargs)
#
# if not self.study:
# self.study = self.instance.study
#
# # changed way this worked on 20200114 and do not need this field any more - skip making the dic...
# # # use the filter to get matrix items in this study ONLY - makes the dic much smaller
# # # this speed up the custom_fields
# # filters = {'assayplatereadermapdatafileblock': {'study_id': self.study.id}}
# # self.dic = get_dic_for_custom_choice_field(self, filters=filters)
# # # print(self.dic)
# #
# for form in self.forms:
# # for field in self.custom_fields:
# # form.fields[field] = DicModelChoiceField(field, self.model, self.dic)
#
# if self.study:
# form.instance.study = self.study
# if form.instance.pk:
# form.instance.modified_by = self.user
# else:
# form.instance.created_by = self.user
#
# # HANDY had this up before the self.forms loop, but needed to move it down to work
# # HANDY to know how to print a queryset to the console
# # self.queryset = self.queryset.order_by('assayplatereadermapdatafile', 'assayplatereadermapdatafileblock', 'plate_index')
# # https://stackoverflow.com/questions/13387446/changing-the-display-order-of-forms-in-a-formset
# # print(self.queryset)
# self.queryset = self.queryset.order_by('assayplatereadermapdatafileblock', 'plate_index')
# # print(self.queryset)
###########
# Formset factory for item and value
# https://stackoverflow.com/questions/29881734/creating-django-form-from-more-than-two-models
AssayPlateReaderMapItemFormSetFactory = inlineformset_factory(
AssayPlateReaderMap,
AssayPlateReaderMapItem,
formset=AssayPlateReaderMapItemFormSet,
form=AssayPlateReaderMapItemForm,
extra=1,
exclude=tracking + ('study',),
)
###########
# 20200522 getting rid of the value form all together since not allowing editing after values attached to plate map.
# # GET RID OF THIS
# AssayPlateReaderMapItemValueFormSetFactory = inlineformset_factory(
# AssayPlateReaderMap,
# AssayPlateReaderMapItemValue,
# formset=AssayPlateReaderMapItemValueFormSet,
# form=AssayPlateReaderMapItemValueForm,
# extra=1,
# exclude=tracking + ('study',),
# )
##########
# end plate reader map page
#####
#####
# Start plate reader file page
# Add a plate reader file to the study (just add the file and check the file extension, no data processing)
class AssayPlateReaderMapDataFileAddForm(BootstrapForm):
"""Form for Plate Reader Data File Upload"""
class Meta(object):
model = AssayPlateReaderMapDataFile
fields = ('plate_reader_file', )
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
self.study = kwargs.pop('study', None)
super(AssayPlateReaderMapDataFileAddForm, self).__init__(*args, **kwargs)
# need or blank study_assay in the update page (add page worked okay)
if not self.study and self.instance.study:
self.study = self.instance.study
if self.study:
self.instance.study = self.study
# check the file extension of the loaded file to make sure the user is not adding spreadsheet files
# https://medium.com/@literallywords/server-side-file-extension-validation-in-django-2-1-b8c8bc3245a0
def clean_plate_reader_file(self):
data = self.cleaned_data['plate_reader_file']
# Run file extension check
file_extension = os.path.splitext(data.name)[1]
if file_extension not in ['.csv', '.tsv', '.txt']:
if '.xl' in file_extension or '.wk' in file_extension or '.12' in file_extension:
raise ValidationError(
'This appears to be an spreadsheet file. To upload, export to a tab delimited file and try again.',
code='invalid'
)
else:
raise ValidationError(
'Invalid file extension - must be in [.csv, .tsv, .txt]',
code='invalid'
)
return data
# UPDATE and VIEW (ADD is separate - above) - user routed here after adding a file to complete other needed info
class AssayPlateReaderMapDataFileForm(BootstrapForm):
"""Form for Assay Plate Reader Map Data File"""
class Meta(object):
model = AssayPlateReaderMapDataFile
fields = ['id', 'description', 'file_delimiter', 'upload_plate_size', 'plate_reader_file', ]
widgets = {
'description': forms.Textarea(attrs={'cols': 50, 'rows': 3}),
'upload_plate_size': forms.TextInput(attrs={'readonly': 'readonly',
'style': 'border-style: none;',
'style': 'background-color: transparent;'
}),
}
def __init__(self, *args, **kwargs):
self.study = kwargs.pop('study', None)
# self.user = kwargs.pop('user', None)
# # ??
# filename_only = kwargs.pop('extra', 0)
super(AssayPlateReaderMapDataFileForm, self).__init__(*args, **kwargs)
# need this because, remember, the plate map doesn't come WITH a study, must tell it which
if not self.study and self.instance.study:
self.study = self.instance.study
if self.study:
self.instance.study = self.study
my_instance = self.instance
# to display the file name without the whole path
form_filename_only = os.path.basename(str(my_instance.plate_reader_file))
self.fields['form_filename_only'].initial = form_filename_only
se_form_plate_size = forms.ChoiceField(
required=False,
choices=assay_plate_reader_map_info_plate_size_choices
)
form_number_blocks = forms.IntegerField(
required=False,
initial=1,
)
form_number_blank_columns = forms.IntegerField(
required=False,
initial=0,
)
form_number_blank_rows = forms.IntegerField(
required=False,
initial=0,
)
form_filename_only = forms.CharField(
required=False,
)
# PI wants to select options for file processing
# Currently, the choices for file formats are HARDCODED here
# if we actually iron out the 'sanctioned' file formats, these could go into a table and be available in the admin
# BUT, reading/processing of the format would still need to be build, so maybe better NOT to put in admin....
se_file_format_select = forms.ChoiceField(
required=False,
initial=0,
choices=(
(0, 'COMPUTERS BEST GUESS'),
(1, 'Softmax Pro 5.3 Molecular Devices M5 (UPDDI DoO)'),
(10, 'Single data block with 1 column of row labels and 1 row of column headers'),
# (96, 'One 96 plate (8 lines by 12 columns) starting at line 1 column 1 (CSV) - requested by Larry V.'),
# (384, 'One 384 plate (16 lines by 24 columns) starting at line 1 column 1 (CSV) - requested by Larry V.'),
# (2, 'Wallac EnVision Manager Version 1.12 (EnVision)'),
(9999, 'USER CUSTOMIZES by Setting Format Information'),
)
)
class AssayPlateReaderMapDataFileBlockForm(forms.ModelForm):
"""Form for Assay Plate Reader Data File Block """
class Meta(object):
model = AssayPlateReaderMapDataFileBlock
# fields = ('id', 'data_block', 'data_block_metadata', 'line_start', 'line_end', 'delimited_start', 'delimited_end', 'over_write_sample_time', 'assayplatereadermap'])
exclude = tracking + ('study',)
# this could go in AssayPlateReaderMapDataFileBlockForm or AssayPlateReaderMapFileBlockFormSet
# but if do in formset, the widgets down get the form control!
# fields = ('id', 'data_block', 'data_block_metadata', 'line_start', 'line_end', 'delimited_start', 'delimited_end', 'over_write_sample_time', 'assayplatereadermap')
widgets = {
# 'form_selected_plate_map_time_unit': forms.TextInput(attrs={'readonly': 'readonly',
# 'style': 'background-color: transparent;',
# }),
'data_block': forms.NumberInput(attrs={'readonly': 'readonly',
# 'style': 'box-shadow:inset 0px, 0px 0px ;',
# 'style': 'border-style: none;',
# 'style': 'border-width: 0;',
# 'style': 'border-color: transparent;',
'style': 'background-color: transparent;',
}),
'line_start': forms.NumberInput(attrs={'class': 'form-control '}),
'line_end': forms.NumberInput(attrs={'class': 'form-control'}),
# 'line_end': forms.NumberInput(attrs={'class': 'form-control required'}),
# 'line_end': forms.NumberInput(attrs={'readonly': 'readonly',
# 'style': 'background-color: transparent;',}),
'delimited_start': forms.NumberInput(attrs={'class': 'form-control '}),
'delimited_end': forms.NumberInput(attrs={'class': 'form-control'}),
# 'delimited_end': forms.NumberInput(attrs={'readonly': 'readonly',
# 'style': 'background-color: transparent;',}),
'over_write_sample_time': forms.NumberInput(attrs={'class': 'form-control'}),
'form_selected_plate_map_time_unit': forms.NumberInput(attrs={'readonly': 'readonly',
'style': 'background-color: transparent;',}),
'data_block_metadata': forms.Textarea(attrs={'cols': 80, 'rows': 1, 'class': 'form-control'}),
}
def __init__(self, *args, **kwargs):
# Get the study
self.study = kwargs.pop('study', None)
self.user = kwargs.pop('user', None)
super(AssayPlateReaderMapDataFileBlockForm, self).__init__(*args, **kwargs)
# this made the dropdown behave when copied with the formset!
# SUPER IMPORTANT and HANDY when need to copy formsets with dropdowns - if have selectized, it is a big mess
self.fields['assayplatereadermap'].widget.attrs.update({'class': ' no-selectize required'})
# not currently using to limit what is removed from the map item table - consider added this feature later
form_changed_something_in_block = forms.IntegerField(
initial=0,
required=False,
)
form_selected_plate_map_time_unit = forms.CharField(
required=False,
)
# formsets
class AssayPlateReaderMapFileBlockFormSet(BaseInlineFormSetForcedUniqueness):
custom_fields_for_limiting_list = (
'assayplatereadermap',
)
# tried putting this in the Form, but had some issues
# print(self.fields['assayplatereadermap'].queryset)
# #
# # next line makes it work
# self.study = 293
# print(self.study)
# self.fields['assayplatereadermap'].queryset = AssayPlateReaderMap.objects.filter(
# study_id=self.study
# )
#
# # print(self.fields['assayplatereadermap'].queryset)
def __init__(self, *args, **kwargs):
# Get the study
self.study = kwargs.pop('study', None)
self.user = kwargs.pop('user', None)
super(AssayPlateReaderMapFileBlockFormSet, self).__init__(*args, **kwargs)
if not self.study:
self.study = self.instance.study
idx = 0
for formset in self.forms:
for field in self.custom_fields_for_limiting_list:
formset.fields[field].queryset = AssayPlateReaderMap.objects.filter(
study_id=self.study
# study_id=293
)
if self.study:
formset.instance.study = self.study
if formset.instance.pk:
formset.instance.modified_by = self.user
else:
formset.instance.created_by = self.user
idx = idx + 1
AssayPlateReaderMapDataFileBlockFormSetFactory = inlineformset_factory(
AssayPlateReaderMapDataFile,
AssayPlateReaderMapDataFileBlock,
formset=AssayPlateReaderMapFileBlockFormSet,
form=AssayPlateReaderMapDataFileBlockForm,
extra=1,
exclude=tracking + ('study',),
)
# ASSAY PLATE MAP END
#####
#####
# Start omics section
# to work correctly, there is a study method, target, unit that is stored in the study setup
# these are saved with the uploaded file
# the only thing we care about is that the target selected is associated with category "Gene Expression"
# OMIC RULES - All method, target, unit (for both category "Gene Expression" and "Computational") must be IN a priori
# OMIC RULES - the target selected in the assay setup must have category "Gene Expression"
# example:
# make/confirm methods, such as TempO-Seq and DESeq2
# make/confirm targets, such as Human 1500+ and assign to method TempO-Seq (category: Gene Expression)
# make/confirm targets, such as baseMean and assign to method DESeq2 (category: Computational)
# make/confirm categories Gene Expression and Computational and assign the targets to them (as indicated above)
# OMIC RULES - The table AssayOmicAnalysisTarget must have a row for each computational target a priori
# OMIC RULES - The table AssayOmicAnalysisTarget field data_type content must match exactly to the hard coded options in assay_omic_data_type_choices
# OMIC RULES - The table AssayOmicAnalysisTarget field name content must match exactly the column headers of the input file (INCLUDING THE CASE - at least, as of 20200902)
# OMIC RULES - The table AssayOmicAnalysisTarget field method content must match exactly method selected in the GUI as the Data Analysis Method
# monkey patch to display method target and unit combo as needed in the assay omic page
# originally was going to display this, but not sure if will need to display anywhere, but, since already used for querying, just keep it
class AbstractClassAssayStudyAssayOmic(AssayStudyAssay):
class Meta:
proxy = True
def __str__(self):
return 'TARGET: {0} METHOD: {1} UNIT: {2}'.format(self.target, self.method, self.unit)
omic_upload_fields_require_file_reprocessing = ['omic_data_file', 'data_type', 'analysis_method']
class AssayOmicDataFileUploadForm(BootstrapForm):
"""Form Upload an AssayOmicDataFileUpload file and associated metadata """
# since the metadata for the log2fc is by group, and is collected once for each file, it is stored with the upload file
# the metadata for the count data is stored separately (and linked by sample name/file column header)
# this was partly due to the evolution of the project and partly due to the expressed preference of a project PI
class Meta(object):
model = AssayOmicDataFileUpload
exclude = tracking + ('study',)
def __init__(self, *args, **kwargs):
self.study = kwargs.pop('study', None)
super(AssayOmicDataFileUploadForm, self).__init__(*args, **kwargs)
if not self.study and self.instance.study:
self.study = self.instance.study
if self.study:
self.instance.study = self.study
# for now, limit to the same study - we may need to revisit this when we think about inter-study
data_groups_filtered = AssayGroup.objects.filter(
study_id=self.instance.study.id
)
# The rules for getting the list of study assays in the upload GUI
# Rule 1: category = gene expression; Rule 2 the target must be associated to that category
study_assay_queryset = AbstractClassAssayStudyAssayOmic.objects.filter(
study_id=self.instance.study.id,
).filter(
target__in=AssayTarget.objects.filter(assaycategory__name="Gene Expression")
).prefetch_related(
'target',
'method',
'unit',
)
number_of_omic_targets_in_study = study_assay_queryset.count()
# above, tried to get the omic targets, but, if did not find any, get all the assays
if number_of_omic_targets_in_study == 0:
study_assay_queryset = AbstractClassAssayStudyAssayOmic.objects.filter(
study_id=self.study
).prefetch_related(
'target',
'method',
'unit',
)
self.fields['study_assay'].queryset = study_assay_queryset
# making the best guess, based on what is in Assays tab, on what should be the form initials
initial_study_assay = None
initial_data_type = None
for each in study_assay_queryset:
#note that the unit table does not use name, it uses unit, hence unit.unit
this_unit = each.unit.unit.lower()
# may need to change this to give something else a priority (this is just to get an initial one)
# Mark had units of 'Fold Change' and 'Count', then switched to not specified
# Tongying used 'Unitless' for all omic data.
if 'ct' in this_unit:
# get one of the counts
initial_study_assay = each.id
initial_data_type = 'normcounts'
break
elif 'fold' in this_unit or 'fc' in this_unit:
# so a unit with a fold in it will get priority
initial_study_assay = each.id
initial_data_type = 'log2fc'
break
else:
# result will be it just gets the last one
initial_study_assay = each.id
initial_data_type = 'log2fc'
self.fields['study_assay'].initial = initial_study_assay
self.fields['default_data_type'].initial = initial_data_type
omic_computational_methods_s = AssayOmicAnalysisTarget.objects.values('method')
omic_computational_methods = AssayMethod.objects.filter(
id__in=omic_computational_methods_s
)
initial_computational_method = None
# just get the first one for the default, if there is one
if len(omic_computational_methods) > 0:
for each in omic_computational_methods:
initial_computational_method = each
break
initial_computational_methods = omic_computational_methods
self.fields['analysis_method'].queryset = initial_computational_methods
self.fields['analysis_method'].initial = initial_computational_method
# HANDY to limit options in a dropdown on a model field in a form
self.fields['group_1'].queryset = data_groups_filtered
self.fields['group_2'].queryset = data_groups_filtered
# when these are visible, they should be class required
# HANDY for adding classes in forms
# the following could remove other classes, so stick with the below
# NO self.fields['group_1'].widget.attrs.update({'class': ' required'})
# YES self.fields['group_1'].widget.attrs['class'] += 'required'
self.fields['group_1'].widget.attrs['class'] += ' required'
self.fields['group_2'].widget.attrs['class'] += ' required'
if self.instance.time_1:
time_1_instance = self.instance.time_1
times_1 = get_split_times(time_1_instance)
self.fields['time_1_day'].initial = times_1.get('day')
self.fields['time_1_hour'].initial = times_1.get('hour')
self.fields['time_1_minute'].initial = times_1.get('minute')
if self.instance.time_2:
time_2_instance = self.instance.time_2
times_2 = get_split_times(time_2_instance)
self.fields['time_2_day'].initial = times_2.get('day')
self.fields['time_2_hour'].initial = times_2.get('hour')
self.fields['time_2_minute'].initial = times_2.get('minute')
# HANDY for adding classes in forms
# NO self.fields['group_1'].widget.attrs.update({'class': ' required'})
# YES self.fields['group_1'].widget.attrs['class'] += 'required'
# BUT, the above does not work on selectized, just do addClass in javascript
# i.e.: $('#id_time_unit').next().addClass('required');
# Luke wanted to use DHM, so, went back to that. Hold in case gets outvoted
# self.fields['time_1_display'].widget.attrs.update({'class': ' form-control required'})
# self.fields['time_2_display'].widget.attrs.update({'class': ' form-control required'})
# time_unit_instance = self.instance.time_unit
# if self.instance.time_1:
# time_1_instance = self.instance.time_1
# ctime = sck_general_convert_time_from_minutes_to_unit_given(time_1_instance, time_unit_instance)
# self.fields['time_1_display'].initial = ctime
#
# if self.instance.time_2:
# time_2_instance = self.instance.time_2
# ctime = sck_general_convert_time_from_minutes_to_unit_given(time_2_instance, time_unit_instance)
# self.fields['time_2_display'].initial = ctime
# not using right now, but may want later if do something different by computational method
# assay_omic_analysis_target_queryset = AssayOmicAnalysisTarget.objects.all()
# data_type_to_computational_method_dict = {}
# for index, each in enumerate(assay_omic_analysis_target_queryset):
# data_type_to_computational_method_dict[each.data_type] = each.method.name
# self.fields['data_type_to_computational_method_dict'].initial = json.dumps(data_type_to_computational_method_dict)
# for the inline help table to show what is programmed to handle
assay_omic_analysis_table_rows = AssayOmicAnalysisTarget.objects.all().order_by(
'data_type',
'method',
'method_order',
'name',
'target',
).prefetch_related(
'target',
'method',
'unit',
)
list_of_dicts_of_assay_omic_analysis_table_rows = []
for each in assay_omic_analysis_table_rows:
data_type = [item[1] for item in assay_omic_data_type_choices if item[0] == each.data_type]
name = each.name
target = each.target.name
method = each.method.name
method_order = each.method_order
dict1 = {
'data_type': data_type,
'file_header': name,
'computational_target': target,
'method': method,
'method_order': method_order,
}
list_of_dicts_of_assay_omic_analysis_table_rows.append(dict1)
self.fields['list_of_dicts_of_assay_omic_analysis_table_rows'].initial = json.dumps(list_of_dicts_of_assay_omic_analysis_table_rows)
# for the template generator
indy_sample_labels, dict_of_method_to_name, dict_of_method_to_header_list = get_dict_of_method_to_header_list(self.study)
self.fields['indy_sample_labels'].initial = indy_sample_labels
self.fields['dict_of_method_to_name'].initial = json.dumps(dict_of_method_to_name)
self.fields['dict_of_method_to_header_list'].initial = json.dumps(dict_of_method_to_header_list)
default_data_type = forms.CharField(required=False,)
time_1_day = forms.DecimalField(
required=False,
label='Day'
)
time_1_hour = forms.DecimalField(
required=False,
label='Hour'
)
time_1_minute = forms.DecimalField(
required=False,
label='Minute'
)
time_2_day = forms.DecimalField(
required=False,
label='Day'
)
time_2_hour = forms.DecimalField(
required=False,
label='Hour'
)
time_2_minute = forms.DecimalField(
required=False,
label='Minute'
)
# time_1_display = forms.DecimalField(
# required=False,
# label='Sample Time 1*'
# )
# time_2_display = forms.DecimalField(
# required=False,
# label='Sample Time 2*'
# )
# not using right now
# data_type_to_computational_method_dict = forms.CharField(widget=forms.TextInput(), required=False, )
# using for the long page help table
list_of_dicts_of_assay_omic_analysis_table_rows = forms.CharField(widget=forms.TextInput(), required=False, )
indy_sample_labels = forms.CharField(widget=forms.TextInput(), required=False, )
dict_of_method_to_header_list = forms.CharField(widget=forms.TextInput(), required=False, )
dict_of_method_to_name = forms.CharField(widget=forms.TextInput(), required=False, )
def clean(self):
data = super(AssayOmicDataFileUploadForm, self).clean()
if self.instance.id:
previous_data_type = self.instance.data_type
else:
previous_data_type = 'adding' # If saving a new instance
data['previous_data_type'] = previous_data_type
# data are changed here, so NEED to return the data
data['time_1'] = 0
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
if data.get('time_1_' + time_unit) is not None:
int_time = (data.get('time_1_' + time_unit))
data.update({'time_1': data.get('time_1') + int_time * conversion,})
data['time_2'] = 0
for time_unit, conversion in list(TIME_CONVERSIONS.items()):
if data.get('time_2_' + time_unit) is not None:
int_time = data.get('time_2_' + time_unit)
data.update({'time_2': data.get('time_2') + int_time * conversion,})
# there is QC that needs run on the form fields and, if not passed, do not even bother with the file QC
# do that QC first
true_to_continue = self.qc_form_fields_only(save=False, called_from='clean')
if not true_to_continue:
validation_message = 'This did not pass form field QC (the file QC cannot be run until the form field QC passes).'
raise ValidationError(validation_message, code='invalid')
# only want to run huge code (and replace data in the point data files)
# if something was changed that affected the point data
# so, let us check that first
# what are form fields REQUIRE that we look again/reprocess (delete and read) the data point data?
need_to_run_long = False
if true_to_continue:
# we are continuing
if self.instance.id:
# this is a update form
for each in omic_upload_fields_require_file_reprocessing:
if each in self.changed_data:
# one of the critical fields was changed, must run the long version
need_to_run_long = True
break
else:
# this is an add form, have to run the long version
need_to_run_long = True
if true_to_continue:
# we are continuing, run the QC that affects the reprocessing of the data file
if need_to_run_long:
# run the QC that affects the file reprocessing
true_to_continue = self.qc_file(save=False, called_from='clean')
if not true_to_continue:
validation_message = 'This did not pass file upload QC.'
raise ValidationError(validation_message, code='invalid')
else:
# passed the file QC, so process the file in clean
self.process_file(save=False, called_from='clean')
return data
def save(self, commit=True):
new_file = None
need_to_run_long = False
if commit:
if self.instance.id:
# an update page
for each in omic_upload_fields_require_file_reprocessing:
if each in self.changed_data:
# a critical field was found in the changed list
need_to_run_long = True
break
else:
# add page, have to run the long version
need_to_run_long = True
new_file = super(AssayOmicDataFileUploadForm, self).save(commit=commit)
if need_to_run_long:
self.process_file(save=True, called_from='save')
return new_file
def qc_form_fields_only(self, save=False, called_from='clean'):
data = self.cleaned_data
data_file_pk = 0
if self.instance.id:
data_file_pk = self.instance.id
true_to_continue = omic_data_quality_clean_check_for_omic_form_fields(self, data, data_file_pk)
return true_to_continue
def qc_file(self, save=False, called_from='clean'):
data = self.cleaned_data
data_file_pk = 0
# self.instance.id is None for the add form
if self.instance.id:
data_file_pk = self.instance.id
# the data_type specific QC is in the utils.py
true_to_continue = omic_data_quality_clean_check_for_omic_file_upload(self, data, data_file_pk, called_from)
return true_to_continue
def process_file(self, save=False, called_from='clean'):
data = self.cleaned_data
data_file_pk = 0
if self.instance.id:
data_file_pk = self.instance.id
file_extension = os.path.splitext(data.get('omic_data_file').name)[1]
data_type = data['data_type']
analysis_method = data['analysis_method']
# HANDY for getting a file object and a file queryset when doing clean vrs save
# this has to be different because the file is not saved yet when add form or when updated the file
if called_from == 'clean':
data_file = data.get('omic_data_file')
a_returned = omic_data_file_processing_data_main_for_all_data_types(save, self.study.id, data_file_pk, data_file, file_extension, called_from, data_type, analysis_method)
else:
queryset = AssayOmicDataFileUpload.objects.get(id=data_file_pk)
data_file = queryset.omic_data_file.open()
a_returned = omic_data_file_processing_data_main_for_all_data_types(save, self.study.id, data_file_pk, data_file, file_extension, called_from, data_type, analysis_method)
return data
# sck need for more than one form, so make is a function
def get_dict_of_method_to_header_list(study_id):
indy_list_of_sample_labels = list(AssayOmicSampleMetadata.objects.filter(
study_id=study_id
).values_list('sample_name', flat=True))
# exclude the name field so can deal with it all in one place
indy_samples = ''
for each in indy_list_of_sample_labels:
indy_samples = indy_samples + ', ' + each
indy_sample_labels = indy_samples[2:]
assay_omic_analysis_target_queryset = AssayOmicAnalysisTarget.objects.filter(
data_type='log2fc'
).order_by(
'method',
'method_order',
).prefetch_related(
'method',
)
dict_of_method_to_header_list = {}
dict_of_method_to_name = {}
prev_method = ''
prev_method_name = ''
list_of_headers = []
for index, each in enumerate(assay_omic_analysis_target_queryset):
name = each.name
method = each.method.pk
method_name = each.method.name
if method == prev_method:
list_of_headers.append(name)
else:
# save the last set if not null
if prev_method != '':
dict_of_method_to_header_list[prev_method] = list_of_headers
dict_of_method_to_name[prev_method] = prev_method_name
# reset the empties for restarting the next set
list_of_headers = []
list_of_headers.append(name)
prev_method = method
prev_method_name = method_name
# do the last one
dict_of_method_to_header_list[method] = list_of_headers
dict_of_method_to_name[method] = method_name
return indy_sample_labels, dict_of_method_to_name, dict_of_method_to_header_list
# End Omic Data File Upload Section
# Start Omic Metadata Collection Section
sample_option_choices = (
('clt', 'Chip/Well - Location - Time'),
('cus1', 'Enter a prefix and starting counter'),
('cus2', 'Enter a prefix and/or suffix for Chip/Well'),
('sn1', 'Sample-1 to Sample-99999 etc'),
('sn2', 'Sample-01 to Sample-99'),
('sn3', 'Sample-001 to Sample-999'),
)
# Form to use to collect the omic sample metadata integrated with workflow tabs
# following the pattern for AssayStudyAssaysForm
# This was formerly class AssayOmicSampleMetadataAdditionalInfoForm(BootstrapForm):
# The actual metadata will be stuffed into a field for performance
# NOTE TO SCK - this will be one record per form (the rest will be crammed in a field...)
# when done as a button, before tab workflow - the form will not have an index page, so, there is a conditional in the call (click to go there) and
# this uses the AssayStudy model so that the study id is easily passed in and out
class AssayStudySamplesForm(BootstrapForm):
class Meta(object):
model = AssayStudy
fields1 = flag_group
fields2 = (
'indy_list_of_dicts_of_table_rows',
'indy_list_of_column_labels',
'indy_list_of_column_labels_show_hide',
'indy_sample_location',
'indy_sample_location_all',
'indy_matrix_item',
'indy_matrix_item_list',
'indy_matrix_item_name_to_pk_dict',
'indy_list_time_units_to_include_initially',
'indy_dict_time_units_to_table_column',
'indy_add_or_update'
)
fields = fields1 + fields2
def __init__(self, *args, **kwargs):
super(AssayStudySamplesForm, self).__init__(*args, **kwargs)
# self.instance will be the study self.instance.id is the study id
indy_table_labels = omic_metadata_find_the_labels_needed_for_the_indy_metadata_table('form', self.instance.id)
indy_list_of_column_labels = indy_table_labels.get('indy_list_of_column_labels')
indy_list_of_column_labels_show_hide = indy_table_labels.get('indy_list_of_column_labels_show_hide')
indy_list_of_dicts_of_table_rows = indy_table_labels.get('indy_list_of_dicts_of_table_rows')
indy_list_time_units_to_include_initially = indy_table_labels.get('indy_list_time_units_to_include_initially')
indy_dict_time_units_to_table_column = indy_table_labels.get('indy_dict_time_units_to_table_column')
indy_add_or_update = indy_table_labels.get('indy_add_or_update')
self.fields['indy_list_of_column_labels'].initial = json.dumps(indy_list_of_column_labels)
self.fields['indy_list_of_column_labels_show_hide'].initial = json.dumps(indy_list_of_column_labels_show_hide)
self.fields['indy_list_of_dicts_of_table_rows'].initial = json.dumps(indy_list_of_dicts_of_table_rows)
self.fields['indy_list_time_units_to_include_initially'].initial = json.dumps(indy_list_time_units_to_include_initially)
self.fields['indy_dict_time_units_to_table_column'].initial = json.dumps(indy_dict_time_units_to_table_column)
self.fields['indy_add_or_update'].initial = json.dumps(indy_add_or_update)
# get the queryset of matrix items in this study
matrix_item_queryset = AssayMatrixItem.objects.filter(study_id=self.instance.id).order_by('name', )
self.fields['indy_matrix_item'].queryset = matrix_item_queryset
# get the matrix items names in this study
matrix_item_list = list(matrix_item_queryset.values_list('name', flat=True))
self.fields['indy_matrix_item_list'].initial = json.dumps(matrix_item_list)
matrix_item_name_and_pk = {}
for index, each in enumerate(matrix_item_queryset):
matrix_item_name_and_pk[each.name] = each.id
self.fields['indy_matrix_item_name_to_pk_dict'].initial = json.dumps(matrix_item_name_and_pk)
# if, the models in the study have locations, pull them
organ_models_in_study_list = list(AssayGroup.objects.filter(
# We will always know the study, this can never be an add page
study_id=self.instance,
).prefetch_related(
'organ_model',
).distinct().only(
'organ_model'
).values_list('organ_model__id', flat=True))
sample_locations_in_list = list(OrganModelLocation.objects.filter(
organ_model__in=organ_models_in_study_list
).prefetch_related(
'sample_location',
# ).values('sample_location__id', 'sample_location__name')
# < QuerySet[{'sample_location__id': 31, 'sample_location__name': 'Well'}, {'sample_location__id': 30, 'sample_location__name': 'Media'}] >
).values_list('sample_location__id', flat=True))
sample_locations_queryset = AssaySampleLocation.objects.filter(
id__in=sample_locations_in_list
).order_by(
'name',
)
# what if the study has more than one model,
# and one has locations and one does not,
# the queryset len will be > 0,
# but not all the locations would be in the sub list
# might have to deal with this, (location needed might not be in list) but
# for now, if there is a sub list, use it
if len(sample_locations_queryset) > 0:
self.fields['indy_sample_location'].queryset = sample_locations_queryset
# for the template generator
indy_sample_labels, dict_of_method_to_name, dict_of_method_to_header_list = get_dict_of_method_to_header_list(self.instance)
self.fields['indy_sample_labels'].initial = indy_sample_labels
self.fields['dict_of_method_to_name'].initial = json.dumps(dict_of_method_to_name)
self.fields['dict_of_method_to_header_list'].initial = json.dumps(dict_of_method_to_header_list)
self.fields['indy_custom_sample_label_starter_prefix'].initial = 'A'
self.fields['indy_custom_sample_label_starter_counter'].initial = '01'
self.fields['indy_custom_sample_label_prefix'].initial = ''
self.fields['indy_custom_sample_label_suffix'].initial = ''
indy_list_of_dicts_of_table_rows = forms.CharField(widget=forms.TextInput(), required=False,)
indy_list_of_column_labels = forms.CharField(widget=forms.TextInput(), required=False,)
indy_list_of_column_labels_show_hide = forms.CharField(widget=forms.TextInput(), required=False, )
indy_list_time_units_to_include_initially = forms.CharField(widget=forms.TextInput(), required=False, )
indy_dict_time_units_to_table_column = forms.CharField(widget=forms.TextInput(), required=False, )
list_of_dicts_of_assay_omic_analysis_table_rows = forms.CharField(widget=forms.TextInput(), required=False, )
indy_sample_labels = forms.CharField(widget=forms.TextInput(), required=False, )
dict_of_method_to_header_list = forms.CharField(widget=forms.TextInput(), required=False, )
dict_of_method_to_name = forms.CharField(widget=forms.TextInput(), required=False, )
# this will return all the sample locations
indy_sample_location = forms.ModelChoiceField(
queryset=AssaySampleLocation.objects.all().order_by(
'name'
),
required=False,
)
indy_sample_location_all = forms.ModelChoiceField(
queryset=AssaySampleLocation.objects.all().order_by(
'name'
),
required=False,
)
indy_matrix_item = forms.ModelChoiceField(
queryset=AssayMatrixItem.objects.none(),
required=False,
)
indy_matrix_item_list = forms.CharField(widget=forms.TextInput(), required=False,)
indy_matrix_item_name_to_pk_dict = forms.CharField(widget=forms.TextInput(), required=False, )
indy_sample_label_options = forms.ChoiceField(
choices=sample_option_choices,
required=False,
)
indy_add_or_update = forms.CharField(widget=forms.TextInput(), required=False,)
indy_sample_labels = forms.CharField(widget=forms.TextInput(), required=False, )
dict_of_method_to_header_list = forms.CharField(widget=forms.TextInput(), required=False, )
dict_of_method_to_name = forms.CharField(widget=forms.TextInput(), required=False, )
indy_custom_sample_label_starter_prefix = forms.CharField(widget=forms.TextInput(), required=False, )
indy_custom_sample_label_starter_counter = forms.CharField(widget=forms.TextInput(), required=False, )
indy_custom_sample_label_prefix = forms.CharField(widget=forms.TextInput(), required=False, )
indy_custom_sample_label_suffix = forms.CharField(widget=forms.TextInput(), required=False, )
def clean(self):
data = super(AssayStudySamplesForm, self).clean()
if 'indy_list_of_dicts_of_table_rows' in self.changed_data or data['indy_add_or_update'].replace('"', '') == 'add':
# print('indy_list_of_dicts_of_table_rows')
# print(data['indy_list_of_dicts_of_table_rows'])
error_message = data_quality_clean_check_for_omic_metadata_empty_fields(self, data)
# print("error message returned ", error_message)
if len(error_message) > 0:
validation_message = 'This did not pass QC. ' + error_message
raise ValidationError(validation_message, code='invalid')
else:
self.process_metadata(save=False, called_from='clean')
return data
def save(self, commit=True):
# note: even though we are not really saving anything to the AssayStudy model/table
# the save expects a model instance to be returned, or an error will result
study = self.instance
if commit:
self.process_metadata(save=True, called_from='save')
return study
def process_metadata(self, save=False, called_from='clean'):
data = self.cleaned_data
a_returned = omic_process_the_omic_sample_metadata(self, called_from, data)
# print(a_returned)
return data
# End omic sample metadata collection section
| mit | 1,930,131,250,192,173,600 | 41.53103 | 207 | 0.56811 | false |
adamrvfisher/TechnicalAnalysisLibrary | Blankrunningslate.py | 1 | 1167 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 5 20:13:42 2017
@author: AmatVictoriaCuramIII
"""
firsttime = '07/01/1983'
secondtime = '01/01/1995'
thirdtime = '01/01/2006'
fourthtime = '01/01/2010'
lasttime = '01/01/2050'
ticker = '^GSPC'
import pandas as pd
from ChaikinAggMaker import ChaikinAggMaker
S1TS = pd.read_pickle('SP500NCS1TS')
S2TS = pd.read_pickle('SP500NCS2TS')
S3TS = pd.read_pickle('SP500NCS3TS')
S4TS = pd.read_pickle('SP500NCS4TS')
S1TS = S1TS.loc[:,~S1TS.columns.duplicated()]
S2TS = S2TS.loc[:,~S2TS.columns.duplicated()]
S3TS = S3TS.loc[:,~S3TS.columns.duplicated()]
S4TS = S4TS.loc[:,~S4TS.columns.duplicated()]
testset1winners = ChaikinAggMaker(ticker, S1TS, firsttime, secondtime)
testset2winners = ChaikinAggMaker(ticker, S2TS, secondtime, thirdtime)
testset3winners = ChaikinAggMaker(ticker, S3TS, thirdtime, lasttime)
testset4winners = ChaikinAggMaker(ticker, S4TS, fourthtime, lasttime)
Aggregate = pd.DataFrame()
Aggregate = pd.concat([Aggregate, testset1winners, testset2winners,
testset3winners, testset4winners],axis = 1)
Aggregate = Aggregate.loc[:,~Aggregate.columns.duplicated()] | apache-2.0 | 4,400,549,916,374,860,000 | 36.966667 | 70 | 0.720651 | false |
artefactual/archivematica-history | src/MCPServer/lib/archivematicaMCP.py | 1 | 11965 | #!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2012 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage MCPServer
# @author Joseph Perry <[email protected]>
# @version svn: $Id$
#~DOC~
#
# --- This is the MCP (master control program) ---
# The intention of this program is to provide a centralized automated distributed system for performing an arbitrary set of tasks on a directory.
# Distributed in that the work can be performed on more than one physical computer simultaneously.
# Centralized in that there is one centre point for configuring flow through the system.
# Automated in that the tasks performed will be based on the config files and instantiated for each of the targets.
#
# It loads configurations from the database.
#
import threading
import watchDirectory
from jobChain import jobChain
from unitSIP import unitSIP
from unitDIP import unitDIP
from unitFile import unitFile
from unitTransfer import unitTransfer
from pyinotify import ThreadedNotifier
import transferD
import RPCServer
import MySQLdb
import signal
import os
import pyinotify
# from archivematicaReplacementDics import replacementDics
# from MCPlogging import *
# from MCPloggingSQL import getUTCDate
import ConfigParser
# from mcpModules.modules import modulesClass
import uuid
import string
import math
import copy
import time
import subprocess
import shlex
import sys
import lxml.etree as etree
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
import databaseInterface
import databaseFunctions
import multiprocessing
import traceback
from externals.singleInstance import singleinstance
from archivematicaFunctions import unicodeToStr
global countOfCreateUnitAndJobChainThreaded
countOfCreateUnitAndJobChainThreaded = 0
config = ConfigParser.SafeConfigParser({'MCPArchivematicaServerInterface': ""})
config.read("/etc/archivematica/MCPServer/serverConfig.conf")
# archivematicaRD = replacementDics(config)
#time to sleep to allow db to be updated with the new location of a SIP
dbWaitSleep = 2
transferDMovedFromCounter = multiprocessing.Value('i', 0)
configs = []
jobsAwaitingApproval = []
jobsQueue = [] #jobs shouldn't remain here long (a few seconds max) before they are turned into tasks (jobs being processed)
jobsBeingProcessed = []
tasksQueue = []
tasksBeingProcessed = []
tasksLock = threading.Lock()
movingDirectoryLock = threading.Lock()
jobsLock = threading.Lock()
watchedDirectories = []
limitTaskThreads = config.getint('Protocol', "limitTaskThreads")
limitTaskThreadsSleep = config.getfloat('Protocol', "limitTaskThreadsSleep")
limitGearmanConnectionsSemaphore = threading.Semaphore(value=config.getint('Protocol', "limitGearmanConnections"))
reservedAsTaskProcessingThreads = config.getint('Protocol', "reservedAsTaskProcessingThreads")
debug = False
stopSignalReceived = False
def isUUID(uuid):
split = uuid.split("-")
if len(split) != 5 \
or len(split[0]) != 8 \
or len(split[1]) != 4 \
or len(split[2]) != 4 \
or len(split[3]) != 4 \
or len(split[4]) != 12 :
return False
return True
def findOrCreateSipInDB(path, waitSleep=dbWaitSleep):
UUID = ""
path = path.replace(config.get('MCPServer', "sharedDirectory"), "%sharedPath%", 1)
#find UUID on end of SIP path
uuidLen = -36
if isUUID(path[uuidLen-1:-1]):
UUID = path[uuidLen-1:-1]
if UUID == "":
#Find it in the database
sql = """SELECT sipUUID FROM SIPs WHERE currentPath = '""" + MySQLdb.escape_string(path) + "';"
#if waitSleep != 0:
#time.sleep(waitSleep) #let db be updated by the microservice that moved it.
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
if not row:
print "Not opening existing SIP:", UUID, "-", path
while row != None:
UUID = row[0]
print "Opening existing SIP:", UUID, "-", path
row = c.fetchone()
sqlLock.release()
#Create it
if UUID == "":
UUID = databaseFunctions.createSIP(path)
print "DEBUG creating sip", path, UUID
return UUID
def createUnitAndJobChain(path, config, terminate=False):
path = unicodeToStr(path)
if os.path.isdir(path):
path = path + "/"
print "createUnitAndJobChain", path, config
unit = None
if os.path.isdir(path):
if config[3] == "SIP":
UUID = findOrCreateSipInDB(path)
unit = unitSIP(path, UUID)
elif config[3] == "DIP":
UUID = findOrCreateSipInDB(path)
unit = unitDIP(path, UUID)
elif config[3] == "Transfer":
#UUID = findOrCreateSipInDB(path)
unit = unitTransfer(path)
elif os.path.isfile(path):
if config[3] == "Transfer":
unit = unitTransfer(path)
else:
return
UUID = uuid.uuid4()
unit = unitFile(path, UUID)
else:
return
jobChain(unit, config[1])
if terminate:
exit(0)
def createUnitAndJobChainThreaded(path, config, terminate=True):
global countOfCreateUnitAndJobChainThreaded
#createUnitAndJobChain(path, config)
#return
try:
if debug:
print "DEBGUG alert watch path: ", path
t = threading.Thread(target=createUnitAndJobChain, args=(path, config), kwargs={"terminate":terminate})
t.daemon = True
countOfCreateUnitAndJobChainThreaded += 1
while(limitTaskThreads <= threading.activeCount() + reservedAsTaskProcessingThreads ):
if stopSignalReceived:
print "Signal was received; stopping createUnitAndJobChainThreaded(path, config)"
exit(0)
print threading.activeCount().__str__()
#print "DEBUG createUnitAndJobChainThreaded waiting on thread count", threading.activeCount()
time.sleep(.5)
countOfCreateUnitAndJobChainThreaded -= 1
t.start()
except Exception as inst:
print "DEBUG EXCEPTION!"
traceback.print_exc(file=sys.stdout)
print type(inst) # the exception instance
print inst.args
def watchDirectories():
rows = []
sql = """SELECT watchedDirectoryPath, chain, onlyActOnDirectories, description FROM WatchedDirectories LEFT OUTER JOIN WatchedDirectoriesExpectedTypes ON WatchedDirectories.expectedType = WatchedDirectoriesExpectedTypes.pk"""
c, sqlLock = databaseInterface.querySQL(sql)
row = c.fetchone()
while row != None:
rows.append(row)
row = c.fetchone()
sqlLock.release()
for row in rows:
directory = row[0].replace("%watchDirectoryPath%", config.get('MCPServer', "watchDirectoryPath"), 1)
if not os.path.isdir(directory):
os.makedirs(directory)
for item in os.listdir(directory):
if item == ".svn":
continue
item = item.decode("utf-8")
path = os.path.join(unicode(directory), item)
#createUnitAndJobChain(path, row)
while(limitTaskThreads <= threading.activeCount() + reservedAsTaskProcessingThreads ):
time.sleep(1)
createUnitAndJobChainThreaded(path, row, terminate=False)
actOnFiles=True
if row[2]: #onlyActOnDirectories
actOnFiles=False
watchDirectory.archivematicaWatchDirectory(directory,variablesAdded=row, callBackFunctionAdded=createUnitAndJobChainThreaded, alertOnFiles=actOnFiles, interval=config.getint('MCPServer', "watchDirectoriesPollInterval"))
#if __name__ == '__main__':
# signal.signal(signal.SIGTERM, signal_handler)
# signal.signal(signal.SIGINT, signal_handler)
#configs = loadConfigs()
#directoryWatchList = loadDirectoryWatchLlist(configs)
#archivematicaMCPServerListen()
def signal_handler(signalReceived, frame):
print signalReceived, frame
global stopSignalReceived
stopSignalReceived = True
threads = threading.enumerate()
for thread in threads:
if False and isinstance(thread, threading.Thread):
try:
print "not stopping: ", type(thread), thread
except Exception as inst:
print "DEBUG EXCEPTION!"
print type(inst) # the exception instance
print inst.args
elif isinstance(thread, pyinotify.ThreadedNotifier):
print "stopping: ", type(thread), thread
try:
thread.stop()
except Exception as inst:
print >>sys.stderr, "DEBUG EXCEPTION!"
print >>sys.stderr, type(inst) # the exception instance
print >>sys.stderr, inst.args
else:
print "not stopping: ", type(thread), thread
sys.stdout.flush()
sys.stderr.flush()
sys.exit(0)
exit(0)
def debugMonitor():
global countOfCreateUnitAndJobChainThreaded
while True:
dblockstatus = "SQL Lock: Locked"
if databaseInterface.sqlLock.acquire(False):
databaseInterface.sqlLock.release()
dblockstatus = "SQL Lock: Unlocked"
print "<DEBUG type=\"archivematicaMCP\">", "\tDate Time: ", databaseInterface.getUTCDate(), "\tThreadCount: ", threading.activeCount(), "\tcountOfCreateUnitAndJobChainThreaded", countOfCreateUnitAndJobChainThreaded, dblockstatus, "</DEBUG>"
time.sleep(60)
def flushOutputs():
while True:
sys.stdout.flush()
sys.stderr.flush()
time.sleep(5)
def startTransferD():
p = multiprocessing.Process(target=transferD.mainWithMovedFromCounter, args=(transferDMovedFromCounter,))
p.start()
print >>sys.stderr, "transferD started - PID:", p.pid
while p.is_alive():
time.sleep(5)
print >>sys.stderr, "transferD crashed\n exitCode:", p.exitcode
def cleanupOldDbEntriesOnNewRun():
sql = """DELETE FROM Jobs WHERE Jobs.currentStep = 'Awaiting decision';"""
databaseInterface.runSQL(sql)
sql = """UPDATE Jobs SET currentStep='Failed' WHERE currentStep='Executing command(s)';"""
databaseInterface.runSQL(sql)
sql = """UPDATE Tasks SET exitCode=-1, stdError='MCP shut down while processing.' WHERE exitCode IS NULL;"""
databaseInterface.runSQL(sql)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
si = singleinstance(config.get('MCPServer', "singleInstancePIDFile"))
if si.alreadyrunning():
print >>sys.stderr, "Another instance is already running. Killing PID:", si.pid
si.kill()
elif False: #testing single instance stuff
while 1:
print "psudo run"
time.sleep(3)
print "This PID: ", si.pid
if True:
import getpass
print "user: ", getpass.getuser()
os.setuid(333)
if False:
t = threading.Thread(target=debugMonitor)
t.daemon = True
t.start()
if True:
t = threading.Thread(target=flushOutputs)
t.daemon = True
t.start()
cleanupOldDbEntriesOnNewRun()
watchDirectories()
#t = threading.Thread(target=startTransferD)
#t.daemon = True
#t.start()
# This is blocking the main thread with the worker loop
RPCServer.startRPCServer()
| agpl-3.0 | 5,251,333,690,015,819,000 | 34.930931 | 248 | 0.67781 | false |
mistercrunch/panoramix | tests/email_tests.py | 1 | 7013 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for email service in Superset"""
import logging
import tempfile
import unittest
from email.mime.application import MIMEApplication
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from unittest import mock
from superset import app
from superset.utils import core as utils
from tests.base_tests import SupersetTestCase
from .utils import read_fixture
send_email_test = mock.Mock()
logger = logging.getLogger(__name__)
class TestEmailSmtp(SupersetTestCase):
def setUp(self):
app.config["SMTP_SSL"] = False
@mock.patch("superset.utils.core.send_mime_email")
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b"attachment")
attachment.seek(0)
utils.send_email_smtp(
"to", "subject", "content", app.config, files=[attachment.name]
)
assert mock_send_mime.called
call_args = mock_send_mime.call_args[0]
logger.debug(call_args)
assert call_args[0] == app.config["SMTP_MAIL_FROM"]
assert call_args[1] == ["to"]
msg = call_args[2]
assert msg["Subject"] == "subject"
assert msg["From"] == app.config["SMTP_MAIL_FROM"]
assert len(msg.get_payload()) == 2
mimeapp = MIMEApplication("attachment")
assert msg.get_payload()[-1].get_payload() == mimeapp.get_payload()
@mock.patch("superset.utils.core.send_mime_email")
def test_send_smtp_data(self, mock_send_mime):
utils.send_email_smtp(
"to", "subject", "content", app.config, data={"1.txt": b"data"}
)
assert mock_send_mime.called
call_args = mock_send_mime.call_args[0]
logger.debug(call_args)
assert call_args[0] == app.config["SMTP_MAIL_FROM"]
assert call_args[1] == ["to"]
msg = call_args[2]
assert msg["Subject"] == "subject"
assert msg["From"] == app.config["SMTP_MAIL_FROM"]
assert len(msg.get_payload()) == 2
mimeapp = MIMEApplication("data")
assert msg.get_payload()[-1].get_payload() == mimeapp.get_payload()
@mock.patch("superset.utils.core.send_mime_email")
def test_send_smtp_inline_images(self, mock_send_mime):
image = read_fixture("sample.png")
utils.send_email_smtp(
"to", "subject", "content", app.config, images=dict(blah=image)
)
assert mock_send_mime.called
call_args = mock_send_mime.call_args[0]
logger.debug(call_args)
assert call_args[0] == app.config["SMTP_MAIL_FROM"]
assert call_args[1] == ["to"]
msg = call_args[2]
assert msg["Subject"] == "subject"
assert msg["From"] == app.config["SMTP_MAIL_FROM"]
assert len(msg.get_payload()) == 2
mimeapp = MIMEImage(image)
assert msg.get_payload()[-1].get_payload() == mimeapp.get_payload()
@mock.patch("superset.utils.core.send_mime_email")
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b"attachment")
attachment.seek(0)
utils.send_email_smtp(
"to",
"subject",
"content",
app.config,
files=[attachment.name],
cc="cc",
bcc="bcc",
)
assert mock_send_mime.called
call_args = mock_send_mime.call_args[0]
assert call_args[0] == app.config["SMTP_MAIL_FROM"]
assert call_args[1] == ["to", "cc", "bcc"]
msg = call_args[2]
assert msg["Subject"] == "subject"
assert msg["From"] == app.config["SMTP_MAIL_FROM"]
assert len(msg.get_payload()) == 2
mimeapp = MIMEApplication("attachment")
assert msg.get_payload()[-1].get_payload() == mimeapp.get_payload()
@mock.patch("smtplib.SMTP_SSL")
@mock.patch("smtplib.SMTP")
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.send_mime_email("from", "to", msg, app.config, dryrun=False)
mock_smtp.assert_called_with(app.config["SMTP_HOST"], app.config["SMTP_PORT"])
assert mock_smtp.return_value.starttls.called
mock_smtp.return_value.login.assert_called_with(
app.config["SMTP_USER"], app.config["SMTP_PASSWORD"]
)
mock_smtp.return_value.sendmail.assert_called_with(
"from", "to", msg.as_string()
)
assert mock_smtp.return_value.quit.called
@mock.patch("smtplib.SMTP_SSL")
@mock.patch("smtplib.SMTP")
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
app.config["SMTP_SSL"] = True
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.send_mime_email("from", "to", MIMEMultipart(), app.config, dryrun=False)
assert not mock_smtp.called
mock_smtp_ssl.assert_called_with(
app.config["SMTP_HOST"], app.config["SMTP_PORT"]
)
@mock.patch("smtplib.SMTP_SSL")
@mock.patch("smtplib.SMTP")
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
smtp_user = app.config["SMTP_USER"]
smtp_password = app.config["SMTP_PASSWORD"]
app.config["SMTP_USER"] = None
app.config["SMTP_PASSWORD"] = None
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.send_mime_email("from", "to", MIMEMultipart(), app.config, dryrun=False)
assert not mock_smtp_ssl.called
mock_smtp.assert_called_with(app.config["SMTP_HOST"], app.config["SMTP_PORT"])
assert not mock_smtp.login.called
app.config["SMTP_USER"] = smtp_user
app.config["SMTP_PASSWORD"] = smtp_password
@mock.patch("smtplib.SMTP_SSL")
@mock.patch("smtplib.SMTP")
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.send_mime_email("from", "to", MIMEMultipart(), app.config, dryrun=True)
assert not mock_smtp.called
assert not mock_smtp_ssl.called
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -800,606,943,113,333,600 | 39.074286 | 86 | 0.62926 | false |
google-research/tensor2robot | hooks/async_export_hook_builder_tpu_test.py | 1 | 2533 | # coding=utf-8
# Copyright 2021 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TD3 Hooks."""
import os
import gin
from tensor2robot.hooks import async_export_hook_builder
from tensor2robot.predictors import exported_savedmodel_predictor
from tensor2robot.preprocessors import noop_preprocessor
from tensor2robot.utils import mocks
from tensor2robot.utils import train_eval
import tensorflow.compat.v1 as tf # tf
_EXPORT_DIR = 'export_dir'
_BATCH_SIZES_FOR_EXPORT = [128]
_MAX_STEPS = 4
_BATCH_SIZE = 4
class AsyncExportHookBuilderTest(tf.test.TestCase):
def test_with_mock_training(self):
model_dir = self.create_tempdir().full_path
mock_t2r_model = mocks.MockT2RModel(
preprocessor_cls=noop_preprocessor.NoOpPreprocessor,
device_type='tpu',
use_avg_model_params=True)
mock_input_generator = mocks.MockInputGenerator(batch_size=_BATCH_SIZE)
export_dir = os.path.join(model_dir, _EXPORT_DIR)
hook_builder = async_export_hook_builder.AsyncExportHookBuilder(
export_dir=export_dir,
create_export_fn=async_export_hook_builder.default_create_export_fn)
gin.parse_config('tf.contrib.tpu.TPUConfig.iterations_per_loop=1')
gin.parse_config('tf.estimator.RunConfig.save_checkpoints_steps=1')
# We optimize our network.
train_eval.train_eval_model(
t2r_model=mock_t2r_model,
input_generator_train=mock_input_generator,
train_hook_builders=[hook_builder],
model_dir=model_dir,
max_train_steps=_MAX_STEPS)
self.assertNotEmpty(tf.io.gfile.listdir(model_dir))
self.assertNotEmpty(tf.io.gfile.listdir(export_dir))
for exported_model_dir in tf.io.gfile.listdir(export_dir):
self.assertNotEmpty(
tf.io.gfile.listdir(os.path.join(export_dir, exported_model_dir)))
predictor = exported_savedmodel_predictor.ExportedSavedModelPredictor(
export_dir=export_dir)
self.assertTrue(predictor.restore())
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -8,682,825,229,037,684,000 | 35.710145 | 76 | 0.731938 | false |
mrven/origin_align_addon | OriginAlign.py | 1 | 5309 | #02.05.2015
#Ivan "mrven" Vostrikov
bl_info = {
"name": "Origin Align",
'author': 'Ivan Vostrikov',
'version': (1, 0, 1),
'blender': (2, 7, 4),
'location': '3d view > Object > Origin Align X/Y/Z',
'description': 'In object mode, sets object origin to selected axis of 3D Cursor',
'wiki_url': 'lowpolyart3d.blogspot.ru',
'tracker_url': '',
"category": "Object",
}
import bpy
#--------------------------------------------------------
# Align Selected object's origin points for Z Axis
class OriginAlignZ(bpy.types.Operator):
"""Origin Align Z"""
bl_idname = "object.origin_align_z"
bl_label = "Origin Align Z"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
# Save selected objects and current position of 3D Cursor
current_selected_obj = bpy.context.selected_objects
saved_cursor_loc = bpy.context.scene.cursor_location.copy()
bpy.ops.object.mode_set(mode = 'OBJECT')
# Change individual origin point
for x in current_selected_obj:
# Select only current object (for setting origin)
bpy.ops.object.select_all(action='DESELECT')
x.select = True
# Save current origin and relocate 3D Cursor to (X-Origin, Y-Origin, Z-Cursor)
saved_origin_loc = x.location.copy()
bpy.context.scene.cursor_location = [saved_origin_loc[0], saved_origin_loc[1], saved_cursor_loc[2]]
# Apply origin to Cursor position
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Reset 3D Cursor position
bpy.context.scene.cursor_location = saved_cursor_loc
# Select again objects
for j in current_selected_obj:
j.select = True;
return {'FINISHED'}
#------------------------------------------------------------
# Align Selected object's origin points for Y Axis
class OriginAlignY(bpy.types.Operator):
"""Origin Align Y"""
bl_idname = "object.origin_align_y"
bl_label = "Origin Align Y"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
# Save selected objects and current position of 3D Cursor
current_selected_obj = bpy.context.selected_objects
saved_cursor_loc = bpy.context.scene.cursor_location.copy()
bpy.ops.object.mode_set(mode = 'OBJECT')
# Change individual origin point
for x in current_selected_obj:
# Select only current object (for setting origin)
bpy.ops.object.select_all(action='DESELECT')
x.select = True
# Save current origin and relocate 3D Cursor to (X-Origin, Y-Cursor, Z-Origin)
saved_origin_loc = x.location.copy()
bpy.context.scene.cursor_location = [saved_origin_loc[0], saved_cursor_loc[1], saved_origin_loc[2]]
# Apply origin to Cursor position
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Reset 3D Cursor position
bpy.context.scene.cursor_location = saved_cursor_loc
# Select again objects
for j in current_selected_obj:
j.select = True;
return {'FINISHED'}
#------------------------------------------------------------
# Align Selected object's origin points for X Axis
class OriginAlignX(bpy.types.Operator):
"""Origin Align X"""
bl_idname = "object.origin_align_x"
bl_label = "Origin Align X"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
# Save selected objects and current position of 3D Cursor
current_selected_obj = bpy.context.selected_objects
saved_cursor_loc = bpy.context.scene.cursor_location.copy()
bpy.ops.object.mode_set(mode = 'OBJECT')
# Change individual origin point
for x in current_selected_obj:
# Select only current object (for setting origin)
bpy.ops.object.select_all(action='DESELECT')
x.select = True
# Save current origin and relocate 3D Cursor to (X-Cursor, Y-Origin, Z-Origin)
saved_origin_loc = x.location.copy()
bpy.context.scene.cursor_location = [saved_cursor_loc[0], saved_origin_loc[1], saved_origin_loc[2]]
# Apply origin to Cursor position
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
# Reset 3D Cursor position
bpy.context.scene.cursor_location = saved_cursor_loc
# Select again objects
for j in current_selected_obj:
j.select = True;
return {'FINISHED'}
def menu_func(self, context):
self.layout.operator(OriginAlignX.bl_idname)
self.layout.operator(OriginAlignY.bl_idname)
self.layout.operator(OriginAlignZ.bl_idname)
def register():
bpy.utils.register_class(OriginAlignZ)
bpy.utils.register_class(OriginAlignY)
bpy.utils.register_class(OriginAlignX)
bpy.types.VIEW3D_MT_object.prepend(menu_func)
def unregister():
bpy.utils.unregister_class(OriginAlignZ)
bpy.utils.unregister_class(OriginAlignY)
bpy.utils.unregister_class(OriginAlignX)
bpy.types.VIEW3D_MT_object.remove(menu_func)
if __name__ == "__main__":
register()
| gpl-2.0 | -5,504,240,410,435,975,000 | 38.619403 | 111 | 0.600301 | false |
GoogleCloudPlatform/python-docs-samples | storage/s3-sdk/noxfile_config.py | 1 | 2085 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
# We are reaching maximum number of HMAC keys on the service account.
# We change the service account based on the value of
# RUN_TESTS_SESSION. The reason we can not use multiple project is
# that our new projects are enforced to have
# 'constraints/iam.disableServiceAccountKeyCreation' policy.
def get_service_account_email():
session = os.environ.get('RUN_TESTS_SESSION')
if session == 'py-3.6':
return ('py36-storage-test@'
'python-docs-samples-tests.iam.gserviceaccount.com')
if session == 'py-3.7':
return ('py37-storage-test@'
'python-docs-samples-tests.iam.gserviceaccount.com')
if session == 'py-3.8':
return ('py38-storage-test@'
'python-docs-samples-tests.iam.gserviceaccount.com')
return os.environ['HMAC_KEY_TEST_SERVICE_ACCOUNT']
TEST_CONFIG_OVERRIDE = {
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
'envs': {
'HMAC_KEY_TEST_SERVICE_ACCOUNT': get_service_account_email(),
# Some tests can not use multiple projects because of several reasons:
# 1. The new projects is enforced to have the
# 'constraints/iam.disableServiceAccountKeyCreation' policy.
# 2. The new projects buckets need to have universal permission model.
# For those tests, we'll use the original project.
'MAIN_GOOGLE_CLOUD_PROJECT': 'python-docs-samples-tests'
},
}
| apache-2.0 | 3,094,053,016,361,112,000 | 40.7 | 78 | 0.70024 | false |
googleads/google-ads-python | google/ads/googleads/v6/services/services/google_ads_field_service/client.py | 1 | 21308 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.resources.types import google_ads_field
from google.ads.googleads.v6.services.services.google_ads_field_service import (
pagers,
)
from google.ads.googleads.v6.services.types import google_ads_field_service
from .transports.base import GoogleAdsFieldServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import GoogleAdsFieldServiceGrpcTransport
class GoogleAdsFieldServiceClientMeta(type):
"""Metaclass for the GoogleAdsFieldService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[GoogleAdsFieldServiceTransport]]
_transport_registry["grpc"] = GoogleAdsFieldServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[GoogleAdsFieldServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class GoogleAdsFieldServiceClient(metaclass=GoogleAdsFieldServiceClientMeta):
"""Service to fetch Google Ads API fields."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
GoogleAdsFieldServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
GoogleAdsFieldServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> GoogleAdsFieldServiceTransport:
"""Return the transport used by the client instance.
Returns:
GoogleAdsFieldServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def google_ads_field_path(google_ads_field: str,) -> str:
"""Return a fully-qualified google_ads_field string."""
return "googleAdsFields/{google_ads_field}".format(
google_ads_field=google_ads_field,
)
@staticmethod
def parse_google_ads_field_path(path: str) -> Dict[str, str]:
"""Parse a google_ads_field path into its component segments."""
m = re.match(r"^googleAdsFields/(?P<google_ads_field>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, GoogleAdsFieldServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the google ads field service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.GoogleAdsFieldServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, GoogleAdsFieldServiceTransport):
# transport is a GoogleAdsFieldServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = GoogleAdsFieldServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_google_ads_field(
self,
request: google_ads_field_service.GetGoogleAdsFieldRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> google_ads_field.GoogleAdsField:
r"""Returns just the requested field.
Args:
request (:class:`google.ads.googleads.v6.services.types.GetGoogleAdsFieldRequest`):
The request object. Request message for
[GoogleAdsFieldService.GetGoogleAdsField][google.ads.googleads.v6.services.GoogleAdsFieldService.GetGoogleAdsField].
resource_name (:class:`str`):
Required. The resource name of the
field to get.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.GoogleAdsField:
A field or resource (artifact) used
by GoogleAdsService.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a google_ads_field_service.GetGoogleAdsFieldRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, google_ads_field_service.GetGoogleAdsFieldRequest
):
request = google_ads_field_service.GetGoogleAdsFieldRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_google_ads_field
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
def search_google_ads_fields(
self,
request: google_ads_field_service.SearchGoogleAdsFieldsRequest = None,
*,
query: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchGoogleAdsFieldsPager:
r"""Returns all fields that match the search query.
Args:
request (:class:`google.ads.googleads.v6.services.types.SearchGoogleAdsFieldsRequest`):
The request object. Request message for
[GoogleAdsFieldService.SearchGoogleAdsFields][google.ads.googleads.v6.services.GoogleAdsFieldService.SearchGoogleAdsFields].
query (:class:`str`):
Required. The query string.
This corresponds to the ``query`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.services.services.google_ads_field_service.pagers.SearchGoogleAdsFieldsPager:
Response message for
[GoogleAdsFieldService.SearchGoogleAdsFields][google.ads.googleads.v6.services.GoogleAdsFieldService.SearchGoogleAdsFields].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([query]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a google_ads_field_service.SearchGoogleAdsFieldsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, google_ads_field_service.SearchGoogleAdsFieldsRequest
):
request = google_ads_field_service.SearchGoogleAdsFieldsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if query is not None:
request.query = query
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.search_google_ads_fields
]
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchGoogleAdsFieldsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("GoogleAdsFieldServiceClient",)
| apache-2.0 | 3,742,854,204,615,069,700 | 40.055877 | 140 | 0.62324 | false |
doucol/mysite | mysite/settings.py | 1 | 2120 | """
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2y6@#rmtd9^@m6&stkbg*kua-z8@hv=pj58kldsk#$+&_8h%wh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Denver'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| mit | -8,617,119,388,000,631,000 | 23.941176 | 71 | 0.723113 | false |
labase/brython_crafty | src/crafty/graphics.py | 1 | 7243 | #! /usr/bin/env python
# -*- coding: UTF8 -*-
"""
############################################################
Graphic handling classes
############################################################
:Author: *Carlo E. T. Oliveira*
:Contact: [email protected]
:Date: 2014/09/17
:Status: This is a "work in progress"
:Revision: 0.1.0
:Home: `Labase <http://labase.selfip.org/>`__
:Copyright: 2013, `GPL <http://is.gd/3Udt>`__.
.. moduleauthor:: Carlo Oliveira <[email protected]>
"""
class Canvas:
"""Canvas. :ref:`canvas`
When this component is added to an entity it will be drawn to the global canvas element.
The canvas element (and hence all Canvas entities) is always rendered below any DOM entities.
Crafty.canvas.init() will be automatically called if it is not called already to initialize the canvas element.
"""
def __init__(self, stage, cmp):
self.__stage = stage
self.__elt = stage.e(cmp)
def draw(self, ctx, x, y, w, h):
""" draw([[Context ctx, ]Number x, Number y, Number w, Number h]).
"""
#print(kwarg)
self.__elt.draw(ctx, x, y, w, h)
return self # .__elt # .attr(dict(**kwarg)).fourway(4)
class Sprite:
"""Sprite. :ref:`sprite`
Component for using tiles in a sprite map.
"""
def __init__(self, ent): # , x, y, w, h):
self.__ent = ent
def sprite(self, x, y, w, h):
self.__ent.requires('Sprite')
self.__ent.sprite(x, y, w, h)
return self
@property
def coord(self):
"""The coordinate of the slide within the sprite in the format of [x, y, w, h].
"""
return self.__ent.__coord
def crop(self, x, y, w, h):
"""Crop the sprite.
If the entity needs to be smaller than the tile size, use this method to crop it.
The values should be in pixels rather than tiles.
:param x: Offset x position
:param y: Offset y position
:param w: New width
:param h: New height
:returns: Self, this same entity
"""
self.__ent.requires('Sprite')
self.__ent.crop(reelId, x, y, w, h)
return self
def reel(self, reelId, duration, fromX, fromY, frameCount):
"""Create animation reel.
:param: String reelId, Duration duration, Number fromX, Number fromY, Number frameCount
:returns: Self, this same entity
"""
self.__ent.requires('SpriteAnimation')
self.__ent.reel(reelId, duration, fromX, fromY, frameCount)
return self
def animate(self, reelId=None, loopCount=1):
"""Animate Entity.
:param reelId: String reel identification
:param loopCount: Integer number of loops, default 1, indefinite if -1
:returns: Self, this same entity
"""
self.__ent.requires('SpriteAnimation')
if reelId:
self.__ent.animate(reelId, loopCount)
else:
self.__ent.animate(loopCount)
return self
def isPlaying(self, reelId=''):
"""Return is the reel is playing.
:param reelId: The reelId of the reel we wish to examine, if missing default to current reel
:returns: The current animation state
"""
self.__ent.requires('SpriteAnimation')
if reelId:
return self.__ent.isPlaying(reelId)
else:
return self.__ent.isPlaying()
def resumeAnimation(self):
"""This will resume animation of the current reel from its current state.
If a reel is already playing, or there is no current reel, there will be no effect.
"""
self.__ent.resumeAnimation()
def pauseAnimation(self):
"""Pauses the currently playing animation, or does nothing if no animation is playing.
"""
self.__ent.pauseAnimation()
def resetAnimation(self):
"""Resets the current animation to its initial state.
Resets the number of loops to the last specified value, which defaults to 1.
Neither pauses nor resumes the current animation.
"""
self.__ent.resetAnimation()
def loops(self, loopCount=None):
"""Set or return the number of loops.
Sets the number of times the animation will loop for.
If called while an animation is in progress, the current state will be considered the first loop.
:param loopCount: The number of times to play the animation, if missig retun loops left.
:returns: The number of loops left. Returns 0 if no reel is active.
"""
if loopCount is None:
return self.__ent.loops(loopCount)
else:
return self.__ent.loops()
def reelPosition(self, position=None):
"""Sets the position of the current reel by frame number.
:param position: The frame to jump to. This is zero-indexed.
A negative values counts back from the last frame.
Sets the position of the current reel by percent progress if number is float.
Jumps to the specified position if string.
The only currently accepted value is "end", which will jump to the end of the reel.
:returns: The current frame number
"""
if position is None:
return self.__ent.reelPosition(position)
else:
return self.__ent.reelPosition()
def tween(self, duration, **properties):
"""This method will animate numeric properties over the specified duration.
These include x, y, w, h, alpha and rotation in degrees.
:param properties: Object of numeric properties and what they should animate to
:param duration: Duration to animate the properties over, in milliseconds.
:returns: The current frame number
"""
self.__ent.requires('Tween')
self.__ent.tween(dict(**properties), duration)
class Draggable:
"""Enable drag and drop of the entity. :ref:`draggable`
"""
def __init__(self, ent):
self.__ent = ent
def dragDirection(self, degrees=None, x=None, y=None):
"""Specify the dragging direction.
if no parameters are given, remove dragging.
:param degrees: A number, the degree (clockwise) of the move direction with respect to the x axis.
:param x: the vector (valx, valy) denotes the move direction.
:param y: the vector (valx, valy) denotes the move direction.
"""
if not degrees is None:
self.__ent.dragDirection(degrees)
elif not x is None:
self.__ent.dragDirection(dict(x=x, y=y))
else:
self.__ent.dragDirection()
def startDrag(self):
"""Make the entity follow the mouse positions.
"""
self.__ent.startDrag()
def stopDrag(self):
"""Stop the entity from dragging. Essentially reproducing the drop.
"""
self.__ent.stopDrag()
def enableDrag(self):
"""Rebind the mouse events. Use if .disableDrag has been called.
"""
self.__ent.enableDrag()
def disableDrag(self):
"""Stops entity from being draggable. Reenable with .enableDrag().
"""
self.__ent.disableDrag() | gpl-2.0 | -4,888,606,017,845,747 | 31.195556 | 115 | 0.597542 | false |
joa/haxe-sublime2-bundle | features/haxe_create_type.py | 1 | 3178 | import sublime_plugin
import sublime
import os
try: # Python 3
from ..HaxeHelper import HaxeComplete_inst, isType
except (ValueError): # Python 2
from HaxeHelper import HaxeComplete_inst, isType
class HaxeCreateType( sublime_plugin.WindowCommand ):
classpath = None
currentFile = None
currentSrc = None
currentType = None
def run( self , paths = [] , t = "class" ) :
builds = HaxeComplete_inst().builds
HaxeCreateType.currentType = t
view = sublime.active_window().active_view()
scopes = view.scope_name(view.sel()[0].end()).split()
fn = view.file_name()
pack = [];
if fn is None :
return
if len(builds) == 0 :
HaxeComplete_inst().extract_build_args(view)
if len(paths) == 0 :
paths.append(fn)
for path in paths :
if os.path.isfile( path ) :
path = os.path.dirname( path )
if HaxeCreateType.classpath is None :
HaxeCreateType.classpath = path
for b in builds :
for cp in b.classpaths :
if path.startswith( cp ) :
HaxeCreateType.classpath = path[0:len(cp)]
for p in path[len(cp):].split(os.sep) :
if "." in p :
break
elif p :
pack.append(p)
if HaxeCreateType.classpath is None :
if len(builds) > 0 :
HaxeCreateType.classpath = builds[0].classpaths[0]
# so default text ends with .
if len(pack) > 0 :
pack.append("")
win = sublime.active_window()
sublime.status_message( "Current classpath : " + HaxeCreateType.classpath )
win.show_input_panel("Enter "+t+" name : " , ".".join(pack) , self.on_done , self.on_change , self.on_cancel )
def on_done( self , inp ) :
fn = self.classpath;
parts = inp.split(".")
pack = []
cl = "${2:ClassName}"
while( len(parts) > 0 ):
p = parts.pop(0)
fn = os.path.join( fn , p )
if isType.match( p ) :
cl = p
break;
else :
pack.append(p)
if len(parts) > 0 :
cl = parts[0]
fn += ".hx"
HaxeCreateType.currentFile = fn
t = HaxeCreateType.currentType
src = "\npackage " + ".".join(pack) + ";\n\n"+t+" "+cl+" "
if t == "typedef" :
src += "= "
src += "\n{\n\n\t$1\n\n}"
HaxeCreateType.currentSrc = src
v = sublime.active_window().open_file( fn )
@staticmethod
def on_activated( view ) :
if view.file_name() == HaxeCreateType.currentFile and view.size() == 0 :
view.run_command( "insert_snippet" , {
"contents" : HaxeCreateType.currentSrc
})
def on_change( self , inp ) :
sublime.status_message( "Current classpath : " + HaxeCreateType.classpath )
#print( inp )
def on_cancel( self ) :
None
| apache-2.0 | 42,504,940,573,944,810 | 27.890909 | 118 | 0.499056 | false |
uclouvain/osis | base/tests/ddd/test_business_validator.py | 1 | 7504 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import SimpleTestCase
from base.ddd.utils.business_validator import BusinessValidator
from base.ddd.utils.validation_message import BusinessValidationMessage, MessageLevel, BusinessValidationMessageList
class ValidatorTest(BusinessValidator):
success_messages = [
BusinessValidationMessage('Success msg', MessageLevel.SUCCESS)
]
def validate(self):
if not self._first_business_validation():
self.add_error_message("Error message")
if not self._other_business_validation():
self.add_messages([
BusinessValidationMessage('Warning msg', MessageLevel.WARNING),
])
def _first_business_validation(self):
return False
def _other_business_validation(self):
return False
class TestBusinessValidator(SimpleTestCase):
def test_property_messages(self):
validator = ValidatorTest()
expected_result = [
BusinessValidationMessage("Error message", MessageLevel.ERROR),
BusinessValidationMessage('Warning msg', MessageLevel.WARNING),
]
validator.is_valid()
self.assertEqual(validator.messages, expected_result)
success_message = ValidatorTest.success_messages[0]
self.assertNotIn(success_message, validator.messages)
def test_property_messages_when_no_success_messages_set(self):
validator = ValidatorTest()
validator.success_messages = None
expected_result = [
BusinessValidationMessage("Error message", MessageLevel.ERROR),
BusinessValidationMessage('Warning msg', MessageLevel.WARNING),
]
validator.is_valid()
self.assertEqual(validator.messages, expected_result)
def test_property_error_message(self):
validator = ValidatorTest()
expected_result = [
BusinessValidationMessage("Error message", MessageLevel.ERROR),
]
validator.is_valid()
self.assertEqual(validator.error_messages, expected_result)
def test_property_warning_messages(self):
validator = ValidatorTest()
expected_result = [
BusinessValidationMessage('Warning msg', MessageLevel.WARNING),
]
validator.is_valid()
self.assertEqual(validator.warning_messages, expected_result)
def test_is_valid(self):
validator = ValidatorTest()
self.assertFalse(validator.is_valid())
def test_is_valid_called_twice_on_same_instance(self):
validator = ValidatorTest()
self.assertFalse(validator.is_valid())
self.assertFalse(validator.is_valid()) # Called a second time
expected_result = [
BusinessValidationMessage("Error message", MessageLevel.ERROR),
BusinessValidationMessage('Warning msg', MessageLevel.WARNING),
]
self.assertNotIn("Success msg", validator.messages, "Should only return warnings and success when is valid")
self.assertEqual(validator.messages, expected_result, "Assert the validator doesn't add messages twice")
def test_reset_messages_does_not_reset_success_message(self):
validator = ValidatorTest()
initial_success_messages = list(validator.success_messages)
validator._reset_messages()
self.assertListEqual(
validator.success_messages,
initial_success_messages,
"Success message is an attribute of the class ; it is a static value, it can't be removed."
)
def test_success_message_to_add_should_be_ignored(self):
validator = ValidatorTest()
validator.add_message(BusinessValidationMessage("Success", MessageLevel.SUCCESS))
self.assertNotIn("Success", validator.messages)
def test_add_message_when_arg_is_not_success_message(self):
validator = ValidatorTest()
validator.add_message(BusinessValidationMessage("A message", MessageLevel.WARNING))
self.assertIn("A message", validator.messages)
def test_add_error_message(self):
validator = ValidatorTest()
validator.add_error_message("An error message")
self.assertIn("An error message", validator.messages)
def test_add_success_message(self):
validator = ValidatorTest()
validator.add_success_message("test")
self.assertIn("test", validator.success_messages)
def test_add_warning_message(self):
validator = ValidatorTest()
validator.add_warning_message("warning msg test")
self.assertIn("warning msg test", validator.warning_messages)
class TestBusinessValidationMessageList(SimpleTestCase):
def setUp(self):
self.error_message = BusinessValidationMessage("error message", MessageLevel.ERROR)
self.warning_message = BusinessValidationMessage("warning message", MessageLevel.WARNING)
self.success_message = BusinessValidationMessage("success message", MessageLevel.SUCCESS)
self.messages = [
self.error_message,
self.warning_message,
self.success_message,
]
def test_contains_error_when_has_error(self):
message_list = BusinessValidationMessageList(messages=[self.error_message])
self.assertTrue(message_list.contains_errors())
def test_contains_error_when_has_warning(self):
message_list = BusinessValidationMessageList(messages=[self.warning_message])
self.assertFalse(message_list.contains_errors())
def test_contains_error_when_messages_is_empty(self):
message_list = BusinessValidationMessageList(messages=[])
self.assertFalse(message_list.contains_errors())
def test_errors_property(self):
message_list = BusinessValidationMessageList(messages=self.messages)
self.assertListEqual(message_list.errors, [self.error_message])
def test_warnings_property(self):
message_list = BusinessValidationMessageList(messages=self.messages)
self.assertListEqual(message_list.warnings, [self.warning_message])
def test_success_property(self):
message_list = BusinessValidationMessageList(messages=self.messages)
self.assertListEqual(message_list.success, [self.success_message])
| agpl-3.0 | -1,827,118,415,433,600,000 | 41.630682 | 116 | 0.682927 | false |
abhikeshav/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_acl_cfg.py | 1 | 80123 | """ Cisco_IOS_XR_ipv4_acl_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-acl package configuration.
This module contains definitions
for the following management objects\:
ipv4\-acl\-and\-prefix\-list\: IPv4 ACL configuration data
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclDscpNumberEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclGrantEnumEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclIcmpTypeCodeEnumEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclIgmpNumberEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclLoggingEnumEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclOperatorEnumEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclPortNumberEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclPrecedenceNumberEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclProtocolNumberEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclTcpBitsNumberEnum
from ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes import Ipv4AclTcpMatchOperatorEnumEnum
class NextHopTypeEnum(Enum):
"""
NextHopTypeEnum
Next\-hop type.
.. data:: NONE_NEXT_HOP = 0
None next-hop.
.. data:: REGULAR_NEXT_HOP = 1
Regular next-hop.
.. data:: DEFAULT_NEXT_HOP = 2
Default next-hop.
"""
NONE_NEXT_HOP = 0
REGULAR_NEXT_HOP = 1
DEFAULT_NEXT_HOP = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['NextHopTypeEnum']
class Ipv4AclAndPrefixList(object):
"""
IPv4 ACL configuration data
.. attribute:: accesses
Table of access lists. Entries in this table and the AccessListExistenceTable table must be kept consistent
**type**\: :py:class:`Accesses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses>`
.. attribute:: log_update
Control access lists log updates
**type**\: :py:class:`LogUpdate <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.LogUpdate>`
.. attribute:: prefixes
Table of ACL prefix lists. Entries in this table and the PrefixListExistenceTable table must be kept consistent
**type**\: :py:class:`Prefixes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.accesses = Ipv4AclAndPrefixList.Accesses()
self.accesses.parent = self
self.log_update = Ipv4AclAndPrefixList.LogUpdate()
self.log_update.parent = self
self.prefixes = Ipv4AclAndPrefixList.Prefixes()
self.prefixes.parent = self
class Accesses(object):
"""
Table of access lists. Entries in this table
and the AccessListExistenceTable table must be
kept consistent
.. attribute:: access
An ACL
**type**\: list of :py:class:`Access <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access = YList()
self.access.parent = self
self.access.name = 'access'
class Access(object):
"""
An ACL
.. attribute:: access_list_name <key>
Access list name \- 64 characters max
**type**\: str
.. attribute:: access_list_entries
ACL entry table; contains list of ACEs
**type**\: :py:class:`AccessListEntries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_name = None
self.access_list_entries = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries()
self.access_list_entries.parent = self
class AccessListEntries(object):
"""
ACL entry table; contains list of ACEs
.. attribute:: access_list_entry
An ACL entry; either a description (remark) or an ACE to match against
**type**\: list of :py:class:`AccessListEntry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.access_list_entry = YList()
self.access_list_entry.parent = self
self.access_list_entry.name = 'access_list_entry'
class AccessListEntry(object):
"""
An ACL entry; either a description (remark)
or an ACE to match against
.. attribute:: sequence_number <key>
Sequence number for this entry
**type**\: int
**range:** 1..2147483646
.. attribute:: capture
Enable capture
**type**\: bool
.. attribute:: counter_name
Counter name
**type**\: str
.. attribute:: destination_network
Destination network settings
**type**\: :py:class:`DestinationNetwork <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationNetwork>`
.. attribute:: destination_port
Destination port settings
**type**\: :py:class:`DestinationPort <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationPort>`
.. attribute:: destination_port_group
Destination port object group name
**type**\: str
**range:** 1..64
.. attribute:: destination_prefix_group
IPv4 destination network object group name
**type**\: str
**range:** 1..64
.. attribute:: dscp
DSCP settings
**type**\: :py:class:`Dscp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Dscp>`
.. attribute:: fragments
Check non\-initial fragments. Item is mutually exclusive with TCP, SCTP, UDP, IGMP and ICMP comparions and with logging
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: grant
Whether to forward or drop packets matching the ACE
**type**\: :py:class:`Ipv4AclGrantEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclGrantEnumEnum>`
.. attribute:: icmp
ICMP settings
**type**\: :py:class:`Icmp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Icmp>`
.. attribute:: icmp_off
To turn off ICMP generation for deny ACEs
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: igmp_message_type
IGMP message type to match. Leave unspecified if no message type comparison is to be done
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclIgmpNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclIgmpNumberEnum>`
----
**type**\: int
**range:** 0..255
----
.. attribute:: log_option
Whether and how to log matches against this entry
**type**\: :py:class:`Ipv4AclLoggingEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclLoggingEnumEnum>`
.. attribute:: next_hop
Next\-hop settings
**type**\: :py:class:`NextHop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop>`
.. attribute:: packet_length
Packet length settings
**type**\: :py:class:`PacketLength <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.PacketLength>`
.. attribute:: precedence
Precedence value to match (if a protocol was specified), leave unspecified if precedence comparion is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPrecedenceNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPrecedenceNumberEnum>`
----
**type**\: int
**range:** 0..7
----
.. attribute:: protocol
Protocol to match
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclProtocolNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclProtocolNumberEnum>`
----
**type**\: int
**range:** 0..255
----
.. attribute:: remark
Comments or a description for the access list
**type**\: str
.. attribute:: source_network
Source network settings
**type**\: :py:class:`SourceNetwork <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourceNetwork>`
.. attribute:: source_port
Source port settings
**type**\: :py:class:`SourcePort <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourcePort>`
.. attribute:: source_port_group
Source port object group name
**type**\: str
**range:** 1..64
.. attribute:: source_prefix_group
IPv4 source network object group name
**type**\: str
**range:** 1..64
.. attribute:: tcp
TCP settings
**type**\: :py:class:`Tcp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Tcp>`
.. attribute:: time_to_live
TTL settings
**type**\: :py:class:`TimeToLive <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.TimeToLive>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sequence_number = None
self.capture = None
self.counter_name = None
self.destination_network = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationNetwork()
self.destination_network.parent = self
self.destination_port = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationPort()
self.destination_port.parent = self
self.destination_port_group = None
self.destination_prefix_group = None
self.dscp = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Dscp()
self.dscp.parent = self
self.fragments = None
self.grant = None
self.icmp = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Icmp()
self.icmp.parent = self
self.icmp_off = None
self.igmp_message_type = None
self.log_option = None
self.next_hop = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop()
self.next_hop.parent = self
self.packet_length = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.PacketLength()
self.packet_length.parent = self
self.precedence = None
self.protocol = None
self.remark = None
self.source_network = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourceNetwork()
self.source_network.parent = self
self.source_port = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourcePort()
self.source_port.parent = self
self.source_port_group = None
self.source_prefix_group = None
self.tcp = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Tcp()
self.tcp.parent = self
self.time_to_live = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.TimeToLive()
self.time_to_live.parent = self
class SourceNetwork(object):
"""
Source network settings.
.. attribute:: source_address
Source IPv4 address to match, leave unspecified for any
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: source_wild_card_bits
Wildcard bits to apply to source address (if specified), leave unspecified for no wildcarding
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.source_address = None
self.source_wild_card_bits = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:source-network'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.source_address is not None:
return True
if self.source_wild_card_bits is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourceNetwork']['meta_info']
class DestinationNetwork(object):
"""
Destination network settings.
.. attribute:: destination_address
Destination IPv4 address to match (if a protocol was specified), leave unspecified for any
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: destination_wild_card_bits
Wildcard bits to apply to destination address (if specified), leave unspecified for no wildcarding
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.destination_address = None
self.destination_wild_card_bits = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:destination-network'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.destination_address is not None:
return True
if self.destination_wild_card_bits is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationNetwork']['meta_info']
class SourcePort(object):
"""
Source port settings.
.. attribute:: first_source_port
First source port for comparison, leave unspecified if source port comparison is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>`
----
**type**\: int
**range:** 0..65535
----
.. attribute:: second_source_port
Second source port for comparion, leave unspecified if source port comparison is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>`
----
**type**\: int
**range:** 0..65535
----
.. attribute:: source_operator
Source comparison operator . Leave unspecified if no source port comparison is to be done
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.first_source_port = None
self.second_source_port = None
self.source_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:source-port'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.first_source_port is not None:
return True
if self.second_source_port is not None:
return True
if self.source_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourcePort']['meta_info']
class DestinationPort(object):
"""
Destination port settings.
.. attribute:: destination_operator
Destination comparison operator. Leave unspecified if no destination port comparison is to be done
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
.. attribute:: first_destination_port
First destination port for comparison, leave unspecified if destination port comparison is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>`
----
**type**\: int
**range:** 0..65535
----
.. attribute:: second_destination_port
Second destination port for comparion, leave unspecified if destination port comparison is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>`
----
**type**\: int
**range:** 0..65535
----
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.destination_operator = None
self.first_destination_port = None
self.second_destination_port = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:destination-port'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.destination_operator is not None:
return True
if self.first_destination_port is not None:
return True
if self.second_destination_port is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationPort']['meta_info']
class Icmp(object):
"""
ICMP settings.
.. attribute:: icmp_type_code
Well known ICMP message code types to match, leave unspecified if ICMP message code type comparion is not to be performed
**type**\: :py:class:`Ipv4AclIcmpTypeCodeEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclIcmpTypeCodeEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.icmp_type_code = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:icmp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.icmp_type_code is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Icmp']['meta_info']
class Tcp(object):
"""
TCP settings.
.. attribute:: tcp_bits
TCP bits to match. Leave unspecified if comparison of TCP bits is not required
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclTcpBitsNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpBitsNumberEnum>`
----
**type**\: int
**range:** 0..32
----
.. attribute:: tcp_bits_mask
TCP bits mask to use for flexible TCP matching. Leave unspecified if tcp\-bits\-match\-operator is unspecified
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclTcpBitsNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpBitsNumberEnum>`
----
**type**\: int
**range:** 0..32
----
.. attribute:: tcp_bits_match_operator
TCP Bits match operator. Leave unspecified if flexible comparison of TCP bits is not required
**type**\: :py:class:`Ipv4AclTcpMatchOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpMatchOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tcp_bits = None
self.tcp_bits_mask = None
self.tcp_bits_match_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:tcp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.tcp_bits is not None:
return True
if self.tcp_bits_mask is not None:
return True
if self.tcp_bits_match_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Tcp']['meta_info']
class PacketLength(object):
"""
Packet length settings.
.. attribute:: packet_length_max
Maximum packet length for comparion, leave unspecified if packet length comparison is not to be performed or if only the minimum packet length should be considered
**type**\: int
**range:** 0..65535
.. attribute:: packet_length_min
Minimum packet length for comparison, leave unspecified if packet length comparison is not to be performed or if only the maximum packet length should be considered
**type**\: int
**range:** 0..65535
.. attribute:: packet_length_operator
Packet length operator applicable if Packet length is to be compared. Leave unspecified if no packet length comparison is to be done
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.packet_length_max = None
self.packet_length_min = None
self.packet_length_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:packet-length'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.packet_length_max is not None:
return True
if self.packet_length_min is not None:
return True
if self.packet_length_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.PacketLength']['meta_info']
class TimeToLive(object):
"""
TTL settings.
.. attribute:: time_to_live_max
Maximum TTL for comparion, leave unspecified if TTL comparison is not to be performed or if only the minimum TTL should be considered
**type**\: int
**range:** 0..255
.. attribute:: time_to_live_min
TTL value for comparison OR Minimum TTL value for TTL range comparision, leave unspecified if TTL classification is not required
**type**\: int
**range:** 0..255
.. attribute:: time_to_live_operator
TTL operator is applicable if TTL is to be compared. Leave unspecified if TTL classification is not required
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.time_to_live_max = None
self.time_to_live_min = None
self.time_to_live_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:time-to-live'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.time_to_live_max is not None:
return True
if self.time_to_live_min is not None:
return True
if self.time_to_live_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.TimeToLive']['meta_info']
class NextHop(object):
"""
Next\-hop settings.
.. attribute:: next_hop_1
The first next\-hop settings
**type**\: :py:class:`NextHop1 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1>`
.. attribute:: next_hop_2
The second next\-hop settings
**type**\: :py:class:`NextHop2 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2>`
.. attribute:: next_hop_3
The third next\-hop settings
**type**\: :py:class:`NextHop3 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3>`
.. attribute:: next_hop_type
The nexthop type
**type**\: :py:class:`NextHopTypeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.NextHopTypeEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.next_hop_1 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1()
self.next_hop_1.parent = self
self.next_hop_2 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2()
self.next_hop_2.parent = self
self.next_hop_3 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3()
self.next_hop_3.parent = self
self.next_hop_type = None
class NextHop1(object):
"""
The first next\-hop settings.
.. attribute:: next_hop
The IPv4 address of the next\-hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: track_name
The object tracking name for the next\-hop
**type**\: str
.. attribute:: vrf_name
The VRF name of the next\-hop
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.next_hop = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-1'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.next_hop is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1']['meta_info']
class NextHop2(object):
"""
The second next\-hop settings.
.. attribute:: next_hop
The IPv4 address of the next\-hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: track_name
The object tracking name for the next\-hop
**type**\: str
.. attribute:: vrf_name
The VRF name of the next\-hop
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.next_hop = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-2'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.next_hop is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2']['meta_info']
class NextHop3(object):
"""
The third next\-hop settings.
.. attribute:: next_hop
The IPv4 address of the next\-hop
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: track_name
The object tracking name for the next\-hop
**type**\: str
.. attribute:: vrf_name
The VRF name of the next\-hop
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.next_hop = None
self.track_name = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-3'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.next_hop is not None:
return True
if self.track_name is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.next_hop_1 is not None and self.next_hop_1._has_data():
return True
if self.next_hop_2 is not None and self.next_hop_2._has_data():
return True
if self.next_hop_3 is not None and self.next_hop_3._has_data():
return True
if self.next_hop_type is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop']['meta_info']
class Dscp(object):
"""
DSCP settings.
.. attribute:: dscp_max
Maximum DSCP value for comparion, leave unspecified if DSCP comparison is not to be performed or if only the minimum DSCP should be considered
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclDscpNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclDscpNumberEnum>`
----
**type**\: int
**range:** 0..63
----
.. attribute:: dscp_min
DSCP value to match or minimum DSCP value for DSCP range comparison, leave unspecified if DSCP comparion is not to be performed
**type**\: one of the below types:
**type**\: :py:class:`Ipv4AclDscpNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclDscpNumberEnum>`
----
**type**\: int
**range:** 0..63
----
.. attribute:: dscp_operator
DSCP operator is applicable only when DSCP range is configured. Leave unspecified if DSCP range is not required
**type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dscp_max = None
self.dscp_min = None
self.dscp_operator = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:dscp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.dscp_max is not None:
return True
if self.dscp_min is not None:
return True
if self.dscp_operator is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Dscp']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:access-list-entry[Cisco-IOS-XR-ipv4-acl-cfg:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.sequence_number is not None:
return True
if self.capture is not None:
return True
if self.counter_name is not None:
return True
if self.destination_network is not None and self.destination_network._has_data():
return True
if self.destination_port is not None and self.destination_port._has_data():
return True
if self.destination_port_group is not None:
return True
if self.destination_prefix_group is not None:
return True
if self.dscp is not None and self.dscp._has_data():
return True
if self.fragments is not None:
return True
if self.grant is not None:
return True
if self.icmp is not None and self.icmp._has_data():
return True
if self.icmp_off is not None:
return True
if self.igmp_message_type is not None:
return True
if self.log_option is not None:
return True
if self.next_hop is not None and self.next_hop._has_data():
return True
if self.packet_length is not None and self.packet_length._has_data():
return True
if self.precedence is not None:
return True
if self.protocol is not None:
return True
if self.remark is not None:
return True
if self.source_network is not None and self.source_network._has_data():
return True
if self.source_port is not None and self.source_port._has_data():
return True
if self.source_port_group is not None:
return True
if self.source_prefix_group is not None:
return True
if self.tcp is not None and self.tcp._has_data():
return True
if self.time_to_live is not None and self.time_to_live._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:access-list-entries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.access_list_entry is not None:
for child_ref in self.access_list_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries']['meta_info']
@property
def _common_path(self):
if self.access_list_name is None:
raise YPYModelError('Key property access_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:accesses/Cisco-IOS-XR-ipv4-acl-cfg:access[Cisco-IOS-XR-ipv4-acl-cfg:access-list-name = ' + str(self.access_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.access_list_name is not None:
return True
if self.access_list_entries is not None and self.access_list_entries._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:accesses'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.access is not None:
for child_ref in self.access:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Accesses']['meta_info']
class Prefixes(object):
"""
Table of ACL prefix lists. Entries in this
table and the PrefixListExistenceTable table
must be kept consistent
.. attribute:: prefix
Name of a prefix list
**type**\: list of :py:class:`Prefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix = YList()
self.prefix.parent = self
self.prefix.name = 'prefix'
class Prefix(object):
"""
Name of a prefix list
.. attribute:: prefix_list_name <key>
Prefix list name \- max 32 characters
**type**\: str
.. attribute:: prefix_list_entries
Sequence of entries forming a prefix list
**type**\: :py:class:`PrefixListEntries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_list_name = None
self.prefix_list_entries = None
class PrefixListEntries(object):
"""
Sequence of entries forming a prefix list
.. attribute:: prefix_list_entry
A prefix list entry; either a description (remark) or a prefix to match against
**type**\: list of :py:class:`PrefixListEntry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries.PrefixListEntry>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.prefix_list_entry = YList()
self.prefix_list_entry.parent = self
self.prefix_list_entry.name = 'prefix_list_entry'
class PrefixListEntry(object):
"""
A prefix list entry; either a description
(remark) or a prefix to match against
.. attribute:: sequence_number <key>
Sequence number of prefix list
**type**\: int
**range:** 1..2147483646
.. attribute:: exact_prefix_length
If exact prefix length matching specified, set the length of prefix to be matched
**type**\: int
**range:** 0..32
.. attribute:: grant
Whether to forward or drop packets matching the prefix list
**type**\: :py:class:`Ipv4AclGrantEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclGrantEnumEnum>`
.. attribute:: match_exact_length
Set to perform an exact prefix length match. Item is mutually exclusive with minimum and maximum length match items
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: match_max_length
Set to perform a maximum length prefix match . Item is mutually exclusive with exact length match item
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: match_min_length
Set to perform a minimum length prefix match . Item is mutually exclusive with exact length match item
**type**\: :py:class:`Empty <ydk.types.Empty>`
.. attribute:: max_prefix_length
If maximum length prefix matching specified, set the maximum length of prefix to be matched
**type**\: int
**range:** 0..32
.. attribute:: min_prefix_length
If minimum length prefix matching specified, set the minimum length of prefix to be matched
**type**\: int
**range:** 0..32
.. attribute:: netmask
Mask of IPv4 address prefix
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: prefix
IPv4 address prefix to match
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remark
Comments or a description for the prefix list. Item is mutually exclusive with all others in the object
**type**\: str
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sequence_number = None
self.exact_prefix_length = None
self.grant = None
self.match_exact_length = None
self.match_max_length = None
self.match_min_length = None
self.max_prefix_length = None
self.min_prefix_length = None
self.netmask = None
self.prefix = None
self.remark = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sequence_number is None:
raise YPYModelError('Key property sequence_number is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:prefix-list-entry[Cisco-IOS-XR-ipv4-acl-cfg:sequence-number = ' + str(self.sequence_number) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.sequence_number is not None:
return True
if self.exact_prefix_length is not None:
return True
if self.grant is not None:
return True
if self.match_exact_length is not None:
return True
if self.match_max_length is not None:
return True
if self.match_min_length is not None:
return True
if self.max_prefix_length is not None:
return True
if self.min_prefix_length is not None:
return True
if self.netmask is not None:
return True
if self.prefix is not None:
return True
if self.remark is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries.PrefixListEntry']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:prefix-list-entries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.prefix_list_entry is not None:
for child_ref in self.prefix_list_entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Prefixes.Prefix.PrefixListEntries']['meta_info']
@property
def _common_path(self):
if self.prefix_list_name is None:
raise YPYModelError('Key property prefix_list_name is None')
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:prefixes/Cisco-IOS-XR-ipv4-acl-cfg:prefix[Cisco-IOS-XR-ipv4-acl-cfg:prefix-list-name = ' + str(self.prefix_list_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.prefix_list_name is not None:
return True
if self.prefix_list_entries is not None and self.prefix_list_entries._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Prefixes.Prefix']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:prefixes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.prefix is not None:
for child_ref in self.prefix:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.Prefixes']['meta_info']
class LogUpdate(object):
"""
Control access lists log updates
.. attribute:: rate
Log update rate (log msgs per second)
**type**\: int
**range:** 1..1000
.. attribute:: threshold
Log update threshold (number of hits)
**type**\: int
**range:** 1..2147483647
"""
_prefix = 'ipv4-acl-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.rate = None
self.threshold = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list/Cisco-IOS-XR-ipv4-acl-cfg:log-update'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.rate is not None:
return True
if self.threshold is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList.LogUpdate']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-acl-cfg:ipv4-acl-and-prefix-list'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.accesses is not None and self.accesses._has_data():
return True
if self.log_update is not None and self.log_update._has_data():
return True
if self.prefixes is not None and self.prefixes._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta
return meta._meta_table['Ipv4AclAndPrefixList']['meta_info']
| apache-2.0 | -9,191,342,410,181,101,000 | 41.938371 | 225 | 0.433109 | false |
hasgeek/funnel | funnel/models/reorder_mixin.py | 1 | 5123 | from __future__ import annotations
from typing import TypeVar, Union
from uuid import UUID
from coaster.sqlalchemy import Query
from . import db
__all__ = ['ReorderMixin']
# Use of TypeVar for subclasses of ReorderMixin as defined in this mypy ticket:
# https://github.com/python/mypy/issues/1212
Reorderable = TypeVar('Reorderable', bound='ReorderMixin')
class ReorderMixin:
"""Adds support for re-ordering sequences within a parent container."""
#: Subclasses must have a created_at column
created_at: db.Column
#: Subclass must have a primary key that is int or uuid
id: Union[int, UUID] # noqa: A003
#: Subclass must declare a parent_id synonym to the parent model fkey column
parent_id: Union[int, UUID]
#: Subclass must declare a seq column or synonym, holding a sequence id. It need not
#: be unique, but reordering is meaningless when both items have the same number
seq: db.Column
#: Subclass must offer a SQLAlchemy query (this is standard from base classes)
query: Query
@property
def parent_scoped_reorder_query_filter(self: Reorderable):
"""
Return a query filter that includes a scope limitation to the parent.
Used alongside the :attr:`seq` column to retrieve a sequence value. Subclasses
may need to override if they have additional criteria relative to the parent,
such as needing to exclude revoked membership records.
"""
cls = self.__class__
return cls.parent_id == self.parent_id
def reorder_item(self: Reorderable, other: Reorderable, before: bool) -> None:
"""Reorder self before or after other item."""
cls = self.__class__
# Safety checks
if other.__class__ is not cls:
raise TypeError("Other must be of the same type")
if other.parent_id != self.parent_id:
raise ValueError("Other must have the same parent")
if self.seq is None or other.seq is None:
raise ValueError("Sequence numbers must be pre-assigned to reorder")
if before:
if self.seq <= other.seq:
# We're already before or equal. Nothing to do.
return
order_columns = (cls.seq.desc(), cls.created_at.desc())
else:
if self.seq >= other.seq:
# We're already after or equal. Nothing to do.
return
order_columns = (cls.seq.asc(), cls.created_at.asc())
# Get all sequence numbers between self and other inclusive. Use:
# descending order if moving up (before other),
# ascending order if moving down (after other)
items_to_reorder = (
cls.query.filter(
self.parent_scoped_reorder_query_filter,
cls.seq >= min(self.seq, other.seq),
cls.seq <= max(self.seq, other.seq),
)
.options(db.load_only(cls.id, cls.seq))
.order_by(*order_columns)
.all()
)
# Pop-off items that share a sequence number and don't need to be moved
while items_to_reorder[0].id != self.id:
items_to_reorder.pop(0)
# Reordering! Move down the list (reversed if `before`), reassigning numbers.
# This list will always start with `self` and end with `other` (with a possible
# tail of items that share the same sequence number as `other`). We assign
# self's sequence number to the next item in the list, and that one's to the
# next and so on until we reach `other`. Then we assign other's sequence
# number to self and we're done.
new_seq_number = self.seq
# Temporarily give self an out-of-bounds number
self.seq = (
db.select([db.func.coalesce(db.func.max(cls.seq) + 1, 1)])
.where(self.parent_scoped_reorder_query_filter)
.scalar_subquery()
)
# Flush it so the db doesn't complain when there's a unique constraint
db.session.flush()
# Reassign all remaining sequence numbers
for reorderable_item in items_to_reorder[1:]: # Skip 0, which is self
reorderable_item.seq, new_seq_number = new_seq_number, reorderable_item.seq
# Flush to force execution order. This does not expunge SQLAlchemy cache as
# of SQLAlchemy 1.3.x. Should that behaviour change, a switch to
# bulk_update_mappings will be required
db.session.flush()
if reorderable_item.id == other.id:
# Don't bother reordering anything after `other`
break
# Assign other's previous sequence number to self
self.seq = new_seq_number
db.session.flush()
def reorder_before(self: Reorderable, other: Reorderable) -> None:
"""Reorder to be before another item's sequence number."""
self.reorder_item(other, True)
def reorder_after(self: Reorderable, other: Reorderable) -> None:
"""Reorder to be after another item's sequence number."""
self.reorder_item(other, False)
| agpl-3.0 | 1,974,439,064,453,346,300 | 40.314516 | 88 | 0.626586 | false |
csparkresearch/ExpEYES17-Qt | SPARK17/textManual/MarkdownPP/Processor.py | 1 | 2387 | # Copyright 2015 John Reese
# Licensed under the MIT license
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
if sys.version_info[0] != 2:
basestring = str
class Processor:
"""
Framework for allowing modules to modify the input data as a set of
transforms. Once the original input data is loaded, the preprocessor
iteratively allows Modules to inspect the data and generate a list of
Transforms against the data. The Transforms are applied in descending
order by line number, and the resulting data is used for the next pass.
Once all modules have transformed the data, it is ready for writing out
to a file.
"""
data = []
transforms = {}
modules = []
def register(self, module):
"""
This method registers an individual module to be called when processing
"""
self.modules.append(module)
def input(self, file):
"""
This method reads the original data from an object following
the file interface.
"""
self.data = file.readlines()
def process(self):
"""
This method handles the actual processing of Modules and Transforms
"""
self.modules.sort(key=lambda x: x.priority)
for module in self.modules:
transforms = module.transform(self.data)
transforms.sort(key=lambda x: x.linenum, reverse=True)
for transform in transforms:
linenum = transform.linenum
if isinstance(transform.data, basestring):
transform.data = [transform.data]
if transform.oper == "prepend":
self.data[linenum:linenum] = transform.data
elif transform.oper == "append":
self.data[linenum+1:linenum+1] = transform.data
elif transform.oper == "swap":
self.data[linenum:linenum+1] = transform.data
elif transform.oper == "drop":
self.data[linenum:linenum+1] = []
elif transform.oper == "noop":
pass
def output(self, file):
"""
This method writes the resulting data to an object following
the file interface.
"""
file.writelines(self.data)
| mit | -5,170,128,148,573,232,000 | 29.602564 | 79 | 0.600335 | false |
rocky/python3-trepan | trepan/processor/command/ipython.py | 1 | 6554 | # -*- coding: utf-8 -*-
# Copyright (C) 2009-2010, 2013, 2015, 2017, 2020 Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import code, sys
# Our local modules
from trepan.processor.command.base_cmd import DebuggerCommand
from traitlets.config.loader import Config
class IPythonCommand(DebuggerCommand):
"""**ipython** [**-d**]
Run IPython as a command subshell.
If *-d* is passed, you can access debugger state via local variable *debugger*.
To issue a debugger command use function *dbgr()*. For example:
dbgr('info program')
See also:
---------
`python`, `bpython`
"""
short_help = "Run IPython as a command subshell"
DebuggerCommand.setup(locals(), category="support", max_args=1)
def dbgr(self, string):
"""Invoke a debugger command from inside a IPython shell called
inside the debugger.
"""
self.proc.cmd_queue.append(string)
self.proc.process_command()
return
def run(self, args):
# See if python's code module is around
# Python does it's own history thing.
# Make sure it doesn't damage ours.
have_line_edit = self.debugger.intf[-1].input.line_edit
if have_line_edit:
try:
self.proc.write_history_file()
except IOError:
pass
pass
cfg = Config()
banner_tmpl = """IPython trepan3k shell%s
Use dbgr(*string*) to issue non-continuing debugger command: *string*"""
debug = len(args) > 1 and args[1] == "-d"
if debug:
banner_tmpl += "\nVariable 'debugger' contains a trepan " "debugger object."
pass
try:
from IPython.terminal.embed import InteractiveShellEmbed
except ImportError:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
# Now create an instance of the embeddable shell. The first
# argument is a string with options exactly as you would type them
# if you were starting IPython at the system command line. Any
# parameters you want to define for configuration can thus be
# specified here.
# Add common classes and methods our namespace here so that
# inside the ipython shell users don't have run imports
my_locals = {}
my_globals = None
if self.proc.curframe:
my_globals = self.proc.curframe.f_globals
if self.proc.curframe.f_locals:
my_locals = self.proc.curframe.f_locals
pass
pass
# Give IPython and the user a way to get access to the debugger.
if debug:
my_locals["debugger"] = self.debugger
my_locals["dbgr"] = self.dbgr
cfg.TerminalInteractiveShell.confirm_exit = False
# sys.ps1 = 'trepan3 >>> '
if len(my_locals):
banner = banner_tmpl % " with locals"
else:
banner = banner_tmpl % ""
pass
InteractiveShellEmbed(
config=cfg,
banner1=banner,
user_ns=my_locals,
module=my_globals,
exit_msg="IPython exiting to trepan3k...",
)()
# restore completion and our history if we can do so.
if hasattr(self.proc.intf[-1], "complete"):
try:
from readline import set_completer, parse_and_bind
parse_and_bind("tab: complete")
set_completer(self.proc.intf[-1].complete)
except ImportError:
pass
pass
if have_line_edit:
self.proc.read_history_file()
pass
return
pass
# Monkey-patched from code.py
# FIXME: get changes into Python.
def interact(banner=None, readfunc=None, my_locals=None, my_globals=None):
"""Almost a copy of code.interact
Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
"""
console = code.InteractiveConsole(my_locals, filename="<trepan>")
console.runcode = lambda code_obj: runcode(console, code_obj)
setattr(console, "globals", my_globals)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner)
pass
# Also monkey-patched from code.py
# FIXME: get changes into Python.
def runcode(obj, code_obj):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
exec(code_obj, obj.locals, obj.globals)
except SystemExit:
raise
except:
obj.showtraceback()
else:
if code.softspace(sys.stdout, 0):
print()
pass
pass
return
if __name__ == "__main__":
from trepan.debugger import Trepan
d = Trepan()
command = IPythonCommand(d.core.processor)
command.proc.frame = sys._getframe()
command.proc.setup()
if len(sys.argv) > 1:
print("Type Python commands and exit to quit.")
print(sys.argv[1])
if sys.argv[1] == "-d":
print(command.run(["bpython", "-d"]))
else:
print(command.run(["bpython"]))
pass
pass
pass
| gpl-3.0 | 5,708,448,502,736,158,000 | 29.769953 | 88 | 0.623741 | false |
alcides/rdflib | rdflib/syntax/parsers/rdfa/state.py | 1 | 20868 | # -*- coding: utf-8 -*-
"""
Parser's execution context (a.k.a. state) object and handling. The state includes:
- dictionary for namespaces. Keys are the namespace prefixes, values are RDFLib Namespace instances
- language, retrieved from C{@xml:lang}
- URI base, determined by <base> (or set explicitly). This is a little bit superfluous, because the current RDFa syntax does not make use of C{@xml:base}; ie, this could be a global value. But the structure is prepared to add C{@xml:base} easily, if needed.
- options, in the form of an L{Options<pyRdfa.Options>} instance
The execution context object is also used to turn relative URI-s and CURIES into real URI references.
@summary: RDFa core parser processing step
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
@var XHTML_PREFIX: prefix for the XHTML vocabulary namespace
@var XHTML_URI: URI prefix of the XHTML vocabulary
@var RDFa_PROFILE: the official RDFa profile URI
@var RDFa_VERSION: the official version string of RDFa
@var usual_protocols: list of "usual" protocols (used to generate warnings when CURIES are not protected)
@var _predefined_rel: list of predefined C{@rev} and C{@rel} values that should be mapped onto the XHTML vocabulary URI-s.
@var _predefined_property: list of predefined C{@property} values that should be mapped onto the XHTML vocabulary URI-s. (At present, this list is empty, but this has been an ongoing question in the group, so the I{mechanism} of checking is still there.)
@var __bnodes: dictionary of blank node names to real blank node
@var __empty_bnode: I{The} Bnode to be associated with the CURIE of the form "C{_:}".
"""
from rdflib.namespace import Namespace, RDF, RDFS
from rdflib.term import BNode, URIRef
from rdflib.syntax.parsers.rdfa.options import Options, GENERIC_XML, XHTML_RDFA, HTML5_RDFA
import re
import random
import urlparse
RDFa_PROFILE = "http://www.w3.org/1999/xhtml/vocab"
RDFa_VERSION = "XHTML+RDFa 1.0"
RDFa_PublicID = "-//W3C//DTD XHTML+RDFa 1.0//EN"
RDFa_SystemID = "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd"
usual_protocols = ["http", "https", "mailto", "ftp", "urn", "gopher", "tel", "ldap", "doi", "news"]
####Predefined @rel/@rev/@property values
# predefined values for the @rel and @rev values. These are considered to be part of a specific
# namespace, defined by the RDFa document.
# At the moment, there are no predefined @property values, but the code is there in case
# some will be defined
XHTML_PREFIX = "xhv"
XHTML_URI = "http://www.w3.org/1999/xhtml/vocab#"
_predefined_rel = ['alternate', 'appendix', 'cite', 'bookmark', 'chapter', 'contents',
'copyright', 'glossary', 'help', 'icon', 'index', 'meta', 'next', 'p3pv1', 'prev',
'role', 'section', 'subsection', 'start', 'license', 'up', 'last', 'stylesheet', 'first', 'top']
_predefined_property = []
#### Managing blank nodes for CURIE-s
__bnodes = {}
__empty_bnode = BNode()
def _get_bnode_from_Curie(var):
"""
'Var' gives the string after the coloumn in a CURIE of the form C{_:XXX}. If this variable has been used
before, then the corresponding BNode is returned; otherwise a new BNode is created and
associated to that value.
@param var: CURIE BNode identifier
@return: BNode
"""
if len(var) == 0:
return __empty_bnode
if var in __bnodes:
return __bnodes[var]
else:
retval = BNode()
__bnodes[var] = retval
return retval
#### Quote URI-s
import urllib
# 'safe' characters for the URI quoting, ie, characters that can safely stay as they are. Other
# special characters are converted to their %.. equivalents for namespace prefixes
_unquotedChars = ':/\?=#'
_warnChars = [' ', '\n', '\r', '\t']
def _quote(uri, options):
"""
'quote' a URI, ie, exchange special characters for their '%..' equivalents. Some of the characters
may stay as they are (listed in L{_unquotedChars}. If one of the characters listed in L{_warnChars}
is also in the uri, an extra warning is also generated.
@param uri: URI
@param options:
@type options: L{Options<pyRdfa.Options>}
"""
suri = uri.strip()
for c in _warnChars:
if suri.find(c) != -1:
if options != None:
options.comment_graph.add_warning('Unusual character in uri:%s; possible error?' % suri)
break
return urllib.quote(suri, _unquotedChars)
#### Core Class definition
class ExecutionContext(object):
"""State at a specific node, including the current set
of namespaces in the RDFLib sense, the
current language, and the base. The class is also used to interpret URI-s and CURIE-s to produce
URI references for RDFLib.
@ivar options: reference to the overall options
@type ivar: L{Options.Options}
@ivar base: the 'base' URI
@ivar defaultNS: default namespace
@ivar lang: language tag (possibly None)
@ivar ns: dictionary of namespaces
@type ns: dictionary, each value is an RDFLib Namespace object
"""
def __init__(self, node, graph, inherited_state=None, base="", options=None):
"""
@param node: the current DOM Node
@param graph: the RDFLib Graph
@keyword inherited_state: the state as inherited
from upper layers. This inherited_state is mixed with the state information
retrieved from the current node.
@type inherited_state: L{State.ExecutionContext}
@keyword base: string denoting the base URI for the specific node. This overrides the possible
base inherited from the upper layers. The
current XHTML+RDFa syntax does not allow the usage of C{@xml:base}, but SVG1.2 does, so this is
necessary for SVG (and other possible XML dialects that accept C{@xml:base})
@keyword options: invocation option
@type options: L{Options<pyRdfa.Options>}
"""
#-----------------------------------------------------------------
# settling the base
# note that, strictly speaking, it is not necessary to add the base to the
# context, because there is only one place to set it (<base> element of the <header>).
# It is done because it is prepared for a possible future change in direction of
# accepting xml:base on each element.
# At the moment, it is invoked with a 'None' at the top level of parsing, that is
# when the <base> element is looked for.
if inherited_state:
self.base = inherited_state.base
self.options = inherited_state.options
# for generic XML versions the xml:base attribute should be handled
if self.options.host_language == GENERIC_XML and node.hasAttribute("xml:base"):
self.base = node.getAttribute("xml:base")
else:
# this is the branch called from the very top
self.base = ""
for bases in node.getElementsByTagName("base"):
if bases.hasAttribute("href"):
self.base = bases.getAttribute("href")
continue
if self.base == "":
self.base = base
# this is just to play safe. I believe this branch should actually not happen...
if options == None:
from pyRdfa import Options
self.options = Options()
else:
self.options = options
# xml:base is not part of XHTML+RDFa, but it is a valid setting for, say, SVG1.2
if self.options.host_language == GENERIC_XML and node.hasAttribute("xml:base"):
self.base = node.getAttribute("xml:base")
self.options.comment_graph.set_base_URI(URIRef(_quote(base, self.options)))
# check the the presense of the @profile and or @version attribute for the RDFa profile...
# This whole branch is, however, irrelevant if the host language is a generic XML one (eg, SVG)
if self.options.host_language != GENERIC_XML:
doctype = None
try:
# I am not 100% sure the HTML5 minidom implementation has this, so let us just be
# cautious here...
doctype = node.ownerDocument.doctype
except:
pass
if doctype == None or not( doctype.publicId == RDFa_PublicID and doctype.systemId == RDFa_SystemID ):
# next level: check the version
html = node.ownerDocument.documentElement
if not( html.hasAttribute("version") and RDFa_VERSION == html.getAttribute("version") ):
# see if least the profile has been set
# Find the <head> element
head = None
for index in range(0, html.childNodes.length-1):
if html.childNodes.item(index).nodeName == "head":
head = html.childNodes.item(index)
break
if not( head != None and head.hasAttribute("profile") and RDFa_PROFILE in head.getAttribute("profile").strip().split() ):
if self.options.host_language == HTML5_RDFA:
self.options.comment_graph.add_info("RDFa profile or RFDa version has not been set (for a correct identification of RDFa). This is not a requirement for RDFa, but it is advised to use one of those nevertheless. Note that in the case of HTML5, the DOCTYPE setting may not work...")
else:
self.options.comment_graph.add_info("None of the RDFa DOCTYPE, RDFa profile, or RFDa version has been set (for a correct identification of RDFa). This is not a requirement for RDFa, but it is advised to use one of those nevertheless.")
#-----------------------------------------------------------------
# Settling the language tags
# check first the lang or xml:lang attribute
# RDFa does not allow the lang attribute. HTML5 relies :-( on @lang;
# I just want to be prepared here...
if options != None and options.host_language == HTML5_RDFA and node.hasAttribute("lang"):
self.lang = node.getAttribute("lang")
if len(self.lang) == 0 : self.lang = None
elif node.hasAttribute("xml:lang"):
self.lang = node.getAttribute("xml:lang")
if len(self.lang) == 0 : self.lang = None
elif inherited_state:
self.lang = inherited_state.lang
else:
self.lang = None
#-----------------------------------------------------------------
# Handling namespaces
# First get the local xmlns declarations/namespaces stuff.
dict = {}
for i in range(0, node.attributes.length):
attr = node.attributes.item(i)
if attr.name.find('xmlns:') == 0 :
# yep, there is a namespace setting
key = attr.localName
if key != "" : # exclude the top level xmlns setting...
if key == "_":
if warning: self.options.comment_graph.add_error("The '_' local CURIE prefix is reserved for blank nodes, and cannot be changed" )
elif key.find(':') != -1:
if warning: self.options.comment_graph.add_error("The character ':' is not valid in a CURIE Prefix" )
else :
# quote the URI, ie, convert special characters into %.. This is
# true, for example, for spaces
uri = _quote(attr.value, self.options)
# 1. create a new Namespace entry
ns = Namespace(uri)
# 2. 'bind' it in the current graph to
# get a nicer output
graph.bind(key, uri)
# 3. Add an entry to the dictionary
dict[key] = ns
# See if anything has been collected at all.
# If not, the namespaces of the incoming state is
# taken over
self.ns = {}
if len(dict) == 0 and inherited_state:
self.ns = inherited_state.ns
else:
if inherited_state:
for k in inherited_state.ns : self.ns[k] = inherited_state.ns[k]
# copying the newly found namespace, possibly overwriting
# incoming values
for k in dict : self.ns[k] = dict[k]
else:
self.ns = dict
# see if the xhtml core vocabulary has been set
self.xhtml_prefix = None
for key in self.ns.keys():
if XHTML_URI == str(self.ns[key]):
self.xhtml_prefix = key
break
if self.xhtml_prefix == None:
if XHTML_PREFIX not in self.ns:
self.ns[XHTML_PREFIX] = Namespace(XHTML_URI)
self.xhtml_prefix = XHTML_PREFIX
else:
# the most disagreeable thing, the user has used
# the prefix for something else...
self.xhtml_prefix = XHTML_PREFIX + '_' + ("%d" % random.randint(1, 1000))
self.ns[self.xhtml_prefix] = Namespace(XHTML_URI)
graph.bind(self.xhtml_prefix, XHTML_URI)
# extra tricks for unusual usages...
# if the 'rdf' prefix is not used, it is artificially added...
if "rdf" not in self.ns:
self.ns["rdf"] = RDF
if "rdfs" not in self.ns:
self.ns["rdfs"] = RDFS
# Final touch: setting the default namespace...
if node.hasAttribute("xmlns"):
self.defaultNS = node.getAttribute("xmlns")
elif inherited_state and inherited_state.defaultNS != None:
self.defaultNS = inherited_state.defaultNS
else:
self.defaultNS = None
def _get_predefined_rels(self, val, warning):
"""Get the predefined URI value for the C{@rel/@rev} attribute.
@param val: attribute name
@param warning: whether a warning should be generated or not
@type warning: boolean
@return: URIRef for the predefined URI (or None)
"""
vv = val.strip().lower()
if vv in _predefined_rel:
return self.ns[self.xhtml_prefix][vv]
else:
if warning: self.options.comment_graph.add_warning("invalid @rel/@rev value: '%s'" % val)
return None
def _get_predefined_properties(self, val, warning):
"""Get the predefined value for the C{@property} attribute.
@param val: attribute name
@param warning: whether a warning should be generated or not
@type warning: boolean
@return: URIRef for the predefined URI (or None)
"""
vv = val.strip().lower()
if vv in _predefined_property:
return self.ns[self.xhtml_prefix][vv]
else:
if warning: self.options.comment_graph.add_warning("invalid @property value: '%s'" % val)
return None
def get_resource(self, val, rel=False, prop=False, warning=True):
"""Get a resource for a CURIE.
The input argument is a CURIE; this is interpreted
via the current namespaces and the corresponding URI Reference is returned
@param val: string of the form "prefix:lname"
@keyword rel: whether the predefined C{@rel/@rev} values should also be interpreted
@keyword prop: whether the predefined C{@property} values should also be interpreted
@return: an RDFLib URIRef instance (or None)
"""
if val == "":
return None
elif val.find(":") != -1:
key = val.split(":", 1)[0]
lname = val.split(":", 1)[1]
if key == "_":
# A possible error: this method is invoked for property URI-s, which
# should not refer to a blank node. This case is checked and a possible
# error condition is handled
self.options.comment_graph.add_error("Blank node CURIE cannot be used in property position: _:%s" % lname)
return None
if key == "":
# This is the ":blabla" case
key = self.xhtml_prefix
else:
# if the resources correspond to a @rel or @rev or @property, then there
# may be one more possibility here, namely that it is one of the
# predefined values
if rel:
return self._get_predefined_rels(val, warning)
elif prop:
return self._get_predefined_properties(val, warning)
else:
self.options.comment_graph.add_warning("Invalid CURIE (without prefix): '%s'" % val)
return None
if key not in self.ns:
self.options.comment_graph.add_error("CURIE used with non declared prefix: %s" % key)
return None
else:
if lname == "":
return URIRef(str(self.ns[key]))
else:
return self.ns[key][lname]
def get_resources(self, val, rel=False, prop=False):
"""Get a series of resources encoded in CURIE-s.
The input argument is a list of CURIE-s; these are interpreted
via the current namespaces and the corresponding URI References are returned.
@param val: strings of the form prefix':'lname, separated by space
@keyword rel: whether the predefined C{@rel/@rev} values should also be interpreted
@keyword prop: whether the predefined C{@property} values should also be interpreted
@return: a list of RDFLib URIRef instances (possibly empty)
"""
val.strip()
resources = [ self.get_resource(v, rel, prop) for v in val.split() if v != None ]
return [ r for r in resources if r != None ]
def get_URI_ref(self, val):
"""Create a URI RDFLib resource for a URI.
The input argument is a URI. It is checked whether it is a local
reference with a '#' or not. If yes, a URIRef combined with the
stored base value is returned. In both cases a URIRef for a full URI is created
and returned
@param val: URI string
@return: an RDFLib URIRef instance
"""
if val == "":
return URIRef(self.base)
elif val[0] == '[' and val[-1] == ']':
self.options.comment_graph.add_error("Illegal usage of CURIE: %s" % val)
return None
else:
return URIRef(urlparse.urljoin(self.base, val))
def get_Curie_ref(self, val):
"""Create a URI RDFLib resource for a CURIE.
The input argument is a CURIE. This means that it is
- either of the form [a:b] where a:b should be resolved as an 'unprotected' CURIE, or
- it is a traditional URI (relative or absolute)
If the second case the URI value is also compared to 'usual' URI protocols ('http', 'https', 'ftp', etc)
(see L{usual_protocols}).
If there is no match, a warning is generated (indeed, a frequent mistake in authoring RDFa is to forget
the '[' and ']' characters to "protect" CURIE-s.)
@param val: CURIE string
@return: an RDFLib URIRef instance
"""
if len(val) == 0:
return URIRef(self.base)
elif val[0] == "[":
if val[-1] == "]":
curie = val[1:-1]
# A possible Blank node reference should be separated here:
if len(curie) >= 2 and curie[0] == "_" and curie[1] == ":":
return _get_bnode_from_Curie(curie[2:])
else:
return self.get_resource(val[1:-1])
else:
# illegal CURIE...
self.options.comment_graph.add_error("Illegal CURIE: %s" % val)
return None
else:
# check the value, to see if an error may have been made...
# Usual protocol values in the URI
v = val.strip().lower()
protocol = urlparse.urlparse(val)[0]
if protocol != "" and protocol not in usual_protocols:
err = "Possible URI error with '%s'; the intention may have been to use a protected CURIE" % val
self.options.comment_graph.add_warning(err)
return self.get_URI_ref(val)
| bsd-3-clause | 117,368,649,494,514,380 | 47.983568 | 312 | 0.592658 | false |
Connexions/openstax-cms | openstax/settings/base.py | 1 | 11195 | # Django settings for openstax project.
import os
import sys
import raven
import logging.config
from django.utils.log import DEFAULT_LOGGING
PROJECT_ROOT = os.path.join(os.path.dirname(__file__), '..', '..')
BASE_DIR = PROJECT_ROOT
# check if running local dev server - else default to DEBUG=False
if len(sys.argv) > 1:
DEBUG = (sys.argv[1] == 'runserver')
else:
DEBUG = False
# These should both be set to true. The openstax.middleware will handle resolving the URL
# without a redirect if needed.
APPEND_SLASH = True
WAGTAIL_APPEND_SLASH=True
# urls.W002 warns about slashes at the start of URLs. But we need those so
# we don't have to have slashes at the end of URLs. So ignore.
SILENCED_SYSTEM_CHECKS = ['urls.W002']
ADMINS = (
('Michael Harrison', '[email protected]'),
)
# Default to dummy email backend. Configure dev/production/local backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'oscms_prodcms',
}
}
# Local time zone for this installation.
TIME_ZONE = 'America/Chicago'
# Language code for this installation.
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
# Note that with this set to True, Wagtail will fall back on using numeric dates
# in date fields, as opposed to 'friendly' dates like "24 Sep 2013", because
# Python's strptime doesn't support localised month names: https://code.djangoproject.com/ticket/13339
USE_L10N = False
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
DATE_FORMAT = 'j F Y'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
# ** You would never normally put the SECRET_KEY in a public repository,
# ** however this is a demo app so we're using the default settings.
# ** Don't use this key in any non-demo usage!
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'wq21wtjo3@d_qfjvd-#td!%7gfy2updj2z+nev^k$iy%=m4_tr'
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'openstax.middleware.CommonMiddlewareAppendSlashWithoutRedirect',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
AUTHENTICATION_BACKENDS = (
'oxauth.backend.OpenStax',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
#'social_core.pipeline.social_auth.social_user',
'oxauth.pipelines.social_user',
'social_core.pipeline.user.create_user',
'oxauth.pipelines.save_profile',
'oxauth.pipelines.update_email',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
IMPORT_USER_PIPELINE = (
'social_django.pipeline.social_auth.social_user',
'social_django.pipeline.user.create_user',
'oxauth.pipelines.save_profile',
'social_django.pipeline.social_auth.associate_user',
'social_django.pipeline.user.user_details',
)
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
ROOT_URLCONF = 'openstax.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'openstax.wsgi.application'
INSTALLED_APPS = [
'scout_apm.django',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'django.contrib.admin',
'django.contrib.sitemaps',
# contrib
'compressor',
'taggit',
'modelcluster',
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'raven.contrib.django.raven_compat',
'django_filters',
'social_django',
'storages',
'django_ses',
'import_export',
'django_extensions',
'inline_actions',
# custom
'accounts',
'admin_templates', # this overrides the admin templates
'api',
'pages',
'books',
'news',
'allies',
'snippets',
'salesforce',
'mail',
'global_settings',
'errata',
'extraadminfilters',
'rangefilter',
'reversion',
'redirects',
'oxauth',
'events',
'webinars',
# wagtail
'wagtail.core',
'wagtail.admin',
'wagtail.documents',
'wagtail.snippets',
'wagtail.users',
'wagtail.images',
'wagtail.embeds',
'wagtail.search',
'wagtail.contrib.redirects',
'wagtail.contrib.forms',
'wagtail.sites',
#'wagtail.contrib.wagtailapi',
'wagtail.api.v2',
'wagtail.contrib.settings',
'wagtail.contrib.modeladmin',
'wagtailimportexport',
'flags',
'duplicatebooks'
]
EMAIL_SUBJECT_PREFIX = '[openstax] '
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2')
# django-compressor settings
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
#django rest framework settings
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
),
# Schools API is timing out, use this to paginate the results
#'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
#'PAGE_SIZE': 100
}
LOGGING_CONFIG = None
LOGLEVEL = os.environ.get('LOGLEVEL', 'error').upper()
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
# exact format is not important, this is the minimum information
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
},
'django.server': DEFAULT_LOGGING['formatters']['django.server'],
},
'handlers': {
#disable logs set with null handler
'null': {
'class': 'logging.NullHandler',
},
# console logs to stderr
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default',
},
'django.server': DEFAULT_LOGGING['handlers']['django.server'],
},
'loggers': {
# default for all undefined Python modules
'': {
'level': 'ERROR',
'handlers': ['console'],
},
# Our application code
'openstax': {
'level': LOGLEVEL,
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
'django.request': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
# Default runserver request logging
'django.server': DEFAULT_LOGGING['loggers']['django.server'],
},
})
# FLAGS
FLAGS = {
'hide_faculty_resources': [],
}
# WAGTAIL SETTINGS
WAGTAIL_SITE_NAME = 'openstax'
# Wagtail API number of results
WAGTAILAPI_LIMIT_MAX = None
WAGTAILUSERS_PASSWORD_ENABLED = False
WAGTAIL_USAGE_COUNT_ENABLED = False
# used in page.models to retrieve book information
CNX_ARCHIVE_URL = 'https://archive.cnx.org'
# Server host (used to populate links in the email)
HOST_LINK = 'https://openstax.org'
WAGTAIL_GRAVATAR_PROVIDER_URL = '//www.gravatar.com/avatar'
MAPBOX_TOKEN = '' # should be the sk from mapbox, put in the appropriate settings file
# Openstax Accounts
ACCOUNTS_URL = 'https://accounts.openstax.org'
AUTHORIZATION_URL = 'https://accounts.openstax.org/oauth/authorize'
ACCESS_TOKEN_URL = 'https://accounts.openstax.org/oauth/token'
USER_QUERY = 'https://accounts.openstax.org/api/user?'
USERS_QUERY = 'https://accounts.openstax.org/api/users?'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'https://openstax.org'
SOCIAL_AUTH_SANITIZE_REDIRECTS = False
SSO_COOKIE_NAME = 'oxa'
BYPASS_SSO_COOKIE_CHECK = False
SIGNATURE_PUBLIC_KEY = "-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDjvO/E8lO+ZJ7JMglbJyiF5/Ae\nIIS2NKbIAMLBMPVBQY7mSqo6j/yxdVNKZCzYAMDWc/VvEfXQQJ2ipIUuDvO+SOwz\nMewQ70hC71hC4s3dmOSLnixDJlnsVpcnKPEFXloObk/fcpK2Vw27e+yY+kIFmV2X\nzrvTnmm9UJERp6tVTQIDAQAB\n-----END PUBLIC KEY-----\n"
ENCRYPTION_PRIVATE_KEY = "c6d9b8683fddce8f2a39ac0565cf18ee"
ENCRYPTION_METHOD = 'A256GCM'
SIGNATURE_ALGORITHM = 'RS256'
DATA_UPLOAD_MAX_NUMBER_FIELDS = 10240
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
STATIC_HOST = 'https://d3bxy9euw4e147.cloudfront.net' if not DEBUG else ''
STATIC_URL = STATIC_HOST + '/static/'
AWS_HEADERS = {
'Access-Control-Allow-Origin': '*'
}
# to override any of the above settings use a local.py file in this directory
try:
from .local import *
except ImportError:
pass
| agpl-3.0 | -222,336,772,868,262,720 | 30.183844 | 303 | 0.677177 | false |
LTD-Beget/sprutio-rpc | lib/FileManager/workers/local/moveToWebDav.py | 1 | 5842 | import os
import shutil
import threading
import time
import traceback
from lib.FileManager.FM import REQUEST_DELAY
from lib.FileManager.WebDavConnection import WebDavConnection
from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer
class MoveToWebDav(BaseWorkerCustomer):
def __init__(self, source, target, paths, overwrite, *args, **kwargs):
super(MoveToWebDav, self).__init__(*args, **kwargs)
self.source = source
self.target = target
self.paths = paths
self.overwrite = overwrite
self.operation_progress = {
"total_done": False,
"total": 0,
"operation_done": False,
"processed": 0,
"previous_percent": 0
}
def run(self):
try:
self.preload()
success_paths = []
error_paths = []
source_path = self.source.get('path')
target_path = self.target.get('path')
if source_path is None:
raise Exception("Source path empty")
if target_path is None:
raise Exception("Target path empty")
source_path = self.get_abs_path(source_path)
webdav = WebDavConnection.create(self.login, self.target.get('server_id'), self.logger)
self.logger.info("MoveToWebDav process run source = %s , target = %s" % (source_path, target_path))
t_total = threading.Thread(target=self.get_total, args=(self.operation_progress, self.paths))
t_total.start()
for path in self.paths:
try:
abs_path = self.get_abs_path(path)
file_basename = os.path.basename(abs_path)
uploading_path = abs_path
if os.path.isdir(abs_path):
uploading_path += '/'
file_basename += '/'
result_upload = webdav.upload(uploading_path, target_path, self.overwrite, file_basename,
self.uploading_progress)
if result_upload['success']:
success_paths.append(path)
if os.path.isfile(abs_path):
os.remove(abs_path)
elif os.path.islink(abs_path):
os.unlink(abs_path)
elif os.path.isdir(abs_path):
shutil.rmtree(abs_path)
else:
error_paths.append(abs_path)
break
except Exception as e:
self.logger.error(
"Error copy %s , error %s , %s" % (str(path), str(e), traceback.format_exc()))
error_paths.append(path)
self.operation_progress["operation_done"] = True
result = {
"success": success_paths,
"errors": error_paths
}
# иначе пользователям кажется что скопировалось не полностью )
progress = {
'percent': round(float(len(success_paths)) / float(len(self.paths)), 2),
'text': str(int(round(float(len(success_paths)) / float(len(self.paths)), 2) * 100)) + '%'
}
time.sleep(REQUEST_DELAY)
self.on_success(self.status_id, data=result, progress=progress, pid=self.pid, pname=self.name)
except Exception as e:
result = {
"error": True,
"message": str(e),
"traceback": traceback.format_exc()
}
self.on_error(self.status_id, result, pid=self.pid, pname=self.name)
def get_total(self, progress_object, paths, count_files=True):
self.logger.debug("start get_total() files = %s" % count_files)
for path in paths:
try:
abs_path = self.get_abs_path(path)
for current, dirs, files in os.walk(abs_path):
if count_files:
progress_object["total"] += len(files)
if os.path.isfile(abs_path):
progress_object["total"] += 1
except Exception as e:
self.logger.error("Error get_total file %s , error %s" % (str(path), str(e)))
continue
progress_object["total_done"] = True
self.logger.debug("done get_total(), found %s objects" % progress_object.get("total"))
return
def uploading_progress(self, download_t, download_d, upload_t, upload_d):
try:
percent_upload = 0
if upload_t != 0:
percent_upload = round(float(upload_d) / float(upload_t), 2)
if percent_upload != self.operation_progress.get("previous_percent"):
if percent_upload == 0 and self.operation_progress.get("previous_percent") != 0:
self.operation_progress["processed"] += 1
self.operation_progress["previous_percent"] = percent_upload
total_percent = percent_upload + self.operation_progress.get("processed")
percent = round(float(total_percent) /
float(self.operation_progress.get("total")), 2)
progress = {
'percent': percent,
'text': str(int(percent * 100)) + '%'
}
self.on_running(self.status_id, progress=progress, pid=self.pid, pname=self.name)
except Exception as ex:
self.logger.error("Error in MoveToWebDav uploading_progress(): %s, traceback = %s" %
(str(ex), traceback.format_exc()))
| gpl-3.0 | -9,210,427,962,494,208,000 | 37.85906 | 111 | 0.518135 | false |
dhylands/upy-examples | rotary.py | 1 | 2290 | import pyb
class rotary():
def __init__(self,Apin='X21',Bpin='X22'):
self.B = pyb.Pin(Bpin)
self.A = pyb.Pin(Apin)
self.prevA = self.A.value()
self.prevB = self.B.value()
self.CWcount = 0
self.CCWcount = 0
self.position = 0
self.Bint = pyb.ExtInt(self.B,pyb.ExtInt.IRQ_RISING_FALLING,pyb.Pin.PULL_UP,self.callback)
self.Aint = pyb.ExtInt(self.A,pyb.ExtInt.IRQ_RISING_FALLING,pyb.Pin.PULL_UP,self.callback)
def callback(self,line):
# self.Bint.disable()
# self.Aint.disable()
A = self.A.value()
B = self.B.value()
#previous state 11
if self.prevA==1 and self.prevB==1:
if A==1 and B==0:
#print( "CCW 11 to 10")
self.CCWcount += 1
self.prevA = A
self.prevB = B
elif A==0 and B==0:
#print ("CW 11 to 00")
self.CWcount += 1
self.prevA = A
self.prevB = B
#previous state 10
elif self.prevA==1 and self.prevB==0:
if A==1 and B==1:
#print ("CW 10 to 11")
self.CWcount += 1
self.prevA = A
self.prevB = B
elif A==0 and B==0:
#print ("CCW 10 to 00")
self.CCWcount += 1
self.prevA = A
self.prevB = B
#previous state 00
elif self.prevA==0 and self.prevB==0:
if A==1 and B==1:
#print ("CCW 00 to 11")
self.CCWcount += 1
self.prevA = A
self.prevB = B
elif A==1 and B==0:
#print ("CW 00 to 10")
self.CWcount+=1
self.prevA = A
self.prevB = B
# self.Bint.enable()
# self.Aint.enable()
if A==1 and B==1:
if self.CWcount>=3 and self.CWcount>self.CCWcount:
self.position+=1
print (self.position)
if self.CCWcount>=3 and self.CCWcount>self.CWcount:
self.position-=1
print(self.position)
self.CCWcount = 0
self.CWcount = 0
| mit | 4,238,425,825,374,999,600 | 26.926829 | 98 | 0.448472 | false |
mozvip/Sick-Beard | sickbeard/versionChecker.py | 1 | 18840 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
from sickbeard import version, ui
from sickbeard import logger
from sickbeard import scene_exceptions
from sickbeard.exceptions import ex
import os, platform, shutil
import subprocess, re
import urllib, urllib2
import zipfile, tarfile
from urllib2 import URLError
import gh_api as github
class CheckVersion():
"""
Version check class meant to run as a thread object with the SB scheduler.
"""
def __init__(self):
self.install_type = self.find_install_type()
if self.install_type == 'win':
self.updater = WindowsUpdateManager()
elif self.install_type == 'git':
self.updater = GitUpdateManager()
elif self.install_type == 'source':
self.updater = SourceUpdateManager()
else:
self.updater = None
def run(self):
self.check_for_new_version()
# refresh scene exceptions too
scene_exceptions.retrieve_exceptions()
def find_install_type(self):
"""
Determines how this copy of SB was installed.
returns: type of installation. Possible values are:
'win': any compiled windows build
'git': running from source using git
'source': running from source without git
"""
# check if we're a windows build
if version.SICKBEARD_VERSION.startswith('build '):
install_type = 'win'
elif os.path.isdir(os.path.join(sickbeard.PROG_DIR, '.git')):
install_type = 'git'
else:
install_type = 'source'
return install_type
def check_for_new_version(self, force=False):
"""
Checks the internet for a newer version.
returns: bool, True for new version or False for no new version.
force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
"""
if not sickbeard.VERSION_NOTIFY and not force:
logger.log(u"Version checking is disabled, not checking for the newest version")
return False
logger.log(u"Checking if "+self.install_type+" needs an update")
if not self.updater.need_update():
logger.log(u"No update needed")
if force:
ui.notifications.message('No update needed')
return False
self.updater.set_newest_text()
return True
def update(self):
if self.updater.need_update():
return self.updater.update()
class UpdateManager():
def get_update_url(self):
return sickbeard.WEB_ROOT+"/home/update/?pid="+str(sickbeard.PID)
class WindowsUpdateManager(UpdateManager):
def __init__(self):
self._cur_version = None
self._cur_commit_hash = None
self._newest_version = None
self.gc_url = 'http://code.google.com/p/sickbeard/downloads/list'
self.version_url = 'https://raw.github.com/sarakha63/Sick-Beard/windows_binaries/updates.txt'
def _find_installed_version(self):
return int(sickbeard.version.SICKBEARD_VERSION[6:])
def _find_newest_version(self, whole_link=False):
"""
Checks git for the newest Windows binary build. Returns either the
build number or the entire build URL depending on whole_link's value.
whole_link: If True, returns the entire URL to the release. If False, it returns
only the build number. default: False
"""
regex = ".*SickBeard\-win32\-alpha\-build(\d+)(?:\.\d+)?\.zip"
svnFile = urllib.urlopen(self.version_url)
for curLine in svnFile.readlines():
logger.log(u"checking line "+curLine, logger.DEBUG)
match = re.match(regex, curLine)
if match:
logger.log(u"found a match", logger.DEBUG)
if whole_link:
return curLine.strip()
else:
return int(match.group(1))
return None
def need_update(self):
self._cur_version = self._find_installed_version()
self._newest_version = self._find_newest_version()
logger.log(u"newest version: "+repr(self._newest_version), logger.DEBUG)
if self._newest_version and self._newest_version > self._cur_version:
return True
def set_newest_text(self):
new_str = 'There is a <a href="'+self.gc_url+'" onclick="window.open(this.href); return false;">newer version available</a> (build '+str(self._newest_version)+')'
new_str += "— <a href=\""+self.get_update_url()+"\">Update Now</a>"
sickbeard.NEWEST_VERSION_STRING = new_str
def update(self):
new_link = self._find_newest_version(True)
logger.log(u"new_link: " + repr(new_link), logger.DEBUG)
if not new_link:
logger.log(u"Unable to find a new version link on google code, not updating")
return False
# download the zip
try:
logger.log(u"Downloading update file from "+str(new_link))
(filename, headers) = urllib.urlretrieve(new_link) #@UnusedVariable
# prepare the update dir
sb_update_dir = os.path.join(sickbeard.PROG_DIR, 'sb-update')
logger.log(u"Clearing out update folder "+sb_update_dir+" before unzipping")
if os.path.isdir(sb_update_dir):
shutil.rmtree(sb_update_dir)
# unzip it to sb-update
logger.log(u"Unzipping from "+str(filename)+" to "+sb_update_dir)
update_zip = zipfile.ZipFile(filename, 'r')
update_zip.extractall(sb_update_dir)
update_zip.close()
# find update dir name
update_dir_contents = os.listdir(sb_update_dir)
if len(update_dir_contents) != 1:
logger.log("Invalid update data, update failed. Maybe try deleting your sb-update folder?", logger.ERROR)
return False
content_dir = os.path.join(sb_update_dir, update_dir_contents[0])
old_update_path = os.path.join(content_dir, 'updater.exe')
new_update_path = os.path.join(sickbeard.PROG_DIR, 'updater.exe')
logger.log(u"Copying new update.exe file from "+old_update_path+" to "+new_update_path)
shutil.move(old_update_path, new_update_path)
# delete the zip
logger.log(u"Deleting zip file from "+str(filename))
os.remove(filename)
except Exception, e:
logger.log(u"Error while trying to update: "+ex(e), logger.ERROR)
return False
return True
class GitUpdateManager(UpdateManager):
def __init__(self):
self._cur_commit_hash = None
self._newest_commit_hash = None
self._num_commits_behind = 0
self.git_url = 'http://code.google.com/p/sickbeard/downloads/list'
self.branch = self._find_git_branch()
def _git_error(self):
error_message = 'Unable to find your git executable - either delete your .git folder and run from source OR <a href="http://code.google.com/p/sickbeard/wiki/AdvancedSettings" onclick="window.open(this.href); return false;">set git_path in your config.ini</a> to enable updates.'
sickbeard.NEWEST_VERSION_STRING = error_message
return None
def _run_git(self, args):
if sickbeard.GIT_PATH:
git_locations = ['"'+sickbeard.GIT_PATH+'"']
else:
git_locations = ['git']
# osx people who start SB from launchd have a broken path, so try a hail-mary attempt for them
if platform.system().lower() == 'darwin':
git_locations.append('/usr/local/git/bin/git')
output = err = None
for cur_git in git_locations:
cmd = cur_git+' '+args
try:
logger.log(u"Executing "+cmd+" with your shell in "+sickbeard.PROG_DIR, logger.DEBUG)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=sickbeard.PROG_DIR)
output, err = p.communicate()
logger.log(u"git output: "+output, logger.DEBUG)
except OSError:
logger.log(u"Command "+cmd+" didn't work, couldn't find git.")
continue
if p.returncode != 0 or 'not found' in output or "not recognized as an internal or external command" in output:
logger.log(u"Unable to find git with command "+cmd, logger.DEBUG)
output = None
elif 'fatal:' in output or err:
logger.log(u"Git returned bad info, are you sure this is a git installation?", logger.ERROR)
output = None
elif output:
break
return (output, err)
def _find_installed_version(self):
"""
Attempts to find the currently installed version of Sick Beard.
Uses git show to get commit version.
Returns: True for success or False for failure
"""
output, err = self._run_git('rev-parse HEAD') #@UnusedVariable
if not output:
return self._git_error()
logger.log(u"Git output: "+str(output), logger.DEBUG)
cur_commit_hash = output.strip()
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.log(u"Output doesn't look like a hash, not using it", logger.ERROR)
return self._git_error()
self._cur_commit_hash = cur_commit_hash
return True
def _find_git_branch(self):
branch_info = self._run_git('symbolic-ref -q HEAD')
if not branch_info or not branch_info[0]:
return 'master'
branch = branch_info[0].strip().replace('refs/heads/', '', 1)
return branch or 'master'
def _check_github_for_update(self):
"""
Uses pygithub to ask github if there is a newer version that the provided
commit hash. If there is a newer version it sets Sick Beard's version text.
commit_hash: hash that we're checking against
"""
self._num_commits_behind = 0
self._newest_commit_hash = None
gh = github.GitHub()
# find newest commit
for curCommit in gh.commits('sarakha63', 'Sick-Beard', self.branch):
if not self._newest_commit_hash:
self._newest_commit_hash = curCommit['sha']
if not self._cur_commit_hash:
break
if curCommit['sha'] == self._cur_commit_hash:
break
self._num_commits_behind += 1
logger.log(u"newest: "+str(self._newest_commit_hash)+" and current: "+str(self._cur_commit_hash)+" and num_commits: "+str(self._num_commits_behind), logger.DEBUG)
def set_newest_text(self):
# if we're up to date then don't set this
if self._num_commits_behind == 100:
message = "or else you're ahead of master"
elif self._num_commits_behind > 0:
message = "you're %d commit" % self._num_commits_behind
if self._num_commits_behind > 1: message += 's'
message += ' behind'
else:
return
if self._newest_commit_hash:
url = 'http://github.com/sarakha63/Sick-Beard/compare/'+self._cur_commit_hash+'...'+self._newest_commit_hash
else:
url = 'http://github.com/sarakha63/Sick-Beard/commits/'
new_str = 'There is a <a href="'+url+'" onclick="window.open(this.href); return false;">newer version available</a> ('+message+')'
new_str += "— <a href=\""+self.get_update_url()+"\">Update Now</a>"
sickbeard.NEWEST_VERSION_STRING = new_str
def need_update(self):
self._find_installed_version()
try:
self._check_github_for_update()
except Exception, e:
logger.log(u"Unable to contact github, can't check for update: "+repr(e), logger.ERROR)
return False
logger.log(u"After checking, cur_commit = "+str(self._cur_commit_hash)+", newest_commit = "+str(self._newest_commit_hash)+", num_commits_behind = "+str(self._num_commits_behind), logger.DEBUG)
if self._num_commits_behind > 0:
return True
return False
def update(self):
"""
Calls git pull origin <branch> in order to update Sick Beard. Returns a bool depending
on the call's success.
"""
self._run_git('config remote.origin.url git://github.com/sarakha63/Sick-Beard.git')
self._run_git('stash')
output, err = self._run_git('pull git://github.com/sarakha63/Sick-Beard.git '+self.branch) #@UnusedVariable
if not output:
return self._git_error()
pull_regex = '(\d+) .+,.+(\d+).+\(\+\),.+(\d+) .+\(\-\)'
(files, insertions, deletions) = (None, None, None)
for line in output.split('\n'):
if 'Already up-to-date.' in line:
logger.log(u"No update available, not updating")
logger.log(u"Output: "+str(output))
return False
elif line.endswith('Aborting.'):
logger.log(u"Unable to update from git: "+line, logger.ERROR)
logger.log(u"Output: "+str(output))
return False
match = re.search(pull_regex, line)
if match:
(files, insertions, deletions) = match.groups()
break
if None in (files, insertions, deletions):
logger.log(u"Didn't find indication of success in output, assuming git pull failed", logger.ERROR)
logger.log(u"Output: "+str(output))
return False
return True
class SourceUpdateManager(GitUpdateManager):
def _find_installed_version(self):
version_file = os.path.join(sickbeard.PROG_DIR, 'version.txt')
if not os.path.isfile(version_file):
self._cur_commit_hash = None
return
fp = open(version_file, 'r')
self._cur_commit_hash = fp.read().strip(' \n\r')
fp.close()
if not self._cur_commit_hash:
self._cur_commit_hash = None
def need_update(self):
parent_result = GitUpdateManager.need_update(self)
if not self._cur_commit_hash:
return True
else:
return parent_result
def set_newest_text(self):
if not self._cur_commit_hash:
logger.log(u"Unknown current version, don't know if we should update or not", logger.DEBUG)
new_str = "Unknown version: If you've never used the Sick Beard upgrade system then I don't know what version you have."
new_str += "— <a href=\""+self.get_update_url()+"\">Update Now</a>"
sickbeard.NEWEST_VERSION_STRING = new_str
else:
GitUpdateManager.set_newest_text(self)
def update(self):
"""
Downloads the latest source tarball from github and installs it over the existing version.
"""
tar_download_url = 'https://github.com/sarakha63/Sick-Beard/tarball/'+version.SICKBEARD_VERSION
sb_update_dir = os.path.join(sickbeard.PROG_DIR, 'sb-update')
version_path = os.path.join(sickbeard.PROG_DIR, 'version.txt')
# retrieve file
try:
logger.log(u"Downloading update from "+tar_download_url)
data = urllib2.urlopen(tar_download_url)
except (IOError, URLError):
logger.log(u"Unable to retrieve new version from "+tar_download_url+", can't update", logger.ERROR)
return False
download_name = data.geturl().split('/')[-1].split('?')[0]
tar_download_path = os.path.join(sickbeard.PROG_DIR, download_name)
# save to disk
f = open(tar_download_path, 'wb')
f.write(data.read())
f.close()
# extract to temp folder
logger.log(u"Extracting file "+tar_download_path)
tar = tarfile.open(tar_download_path)
tar.extractall(sb_update_dir)
tar.close()
# delete .tar.gz
logger.log(u"Deleting file "+tar_download_path)
os.remove(tar_download_path)
# find update dir name
update_dir_contents = [x for x in os.listdir(sb_update_dir) if os.path.isdir(os.path.join(sb_update_dir, x))]
if len(update_dir_contents) != 1:
logger.log(u"Invalid update data, update failed: "+str(update_dir_contents), logger.ERROR)
return False
content_dir = os.path.join(sb_update_dir, update_dir_contents[0])
# walk temp folder and move files to main folder
for dirname, dirnames, filenames in os.walk(content_dir): #@UnusedVariable
dirname = dirname[len(content_dir)+1:]
for curfile in filenames:
old_path = os.path.join(content_dir, dirname, curfile)
new_path = os.path.join(sickbeard.PROG_DIR, dirname, curfile)
if os.path.isfile(new_path):
os.remove(new_path)
os.renames(old_path, new_path)
# update version.txt with commit hash
try:
ver_file = open(version_path, 'w')
ver_file.write(self._newest_commit_hash)
ver_file.close()
except IOError, e:
logger.log(u"Unable to write version file, update not complete: "+ex(e), logger.ERROR)
return False
return True
| gpl-3.0 | -4,442,054,767,377,110,000 | 34.941176 | 286 | 0.578556 | false |
Transkribus/TranskribusDU | TranskribusDU/gcn/avg_checkpoints.py | 1 | 4652 | # Taken from https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/avg_checkpoints.py
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to average values of variables in a list of checkpoint files."""
from __future__ import division
from __future__ import print_function
import os
# Dependency imports
import numpy as np
import six
from six.moves import zip # pylint: disable=redefined-builtin
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("checkpoints", "",
"Comma-separated list of checkpoints to average.")
flags.DEFINE_integer("num_last_checkpoints", 0,
"Averages the last N saved checkpoints."
" If the checkpoints flag is set, this is ignored.")
flags.DEFINE_string("prefix", "",
"Prefix (e.g., directory) to append to each checkpoint.")
flags.DEFINE_string("output_path", "/tmp/averaged.ckpt",
"Path to output the averaged checkpoint to.")
def checkpoint_exists(path):
return (tf.gfile.Exists(path) or tf.gfile.Exists(path + ".meta") or
tf.gfile.Exists(path + ".index"))
def main(_):
if FLAGS.checkpoints:
# Get the checkpoints list from flags and run some basic checks.
checkpoints = [c.strip() for c in FLAGS.checkpoints.split(",")]
checkpoints = [c for c in checkpoints if c]
if not checkpoints:
raise ValueError("No checkpoints provided for averaging.")
if FLAGS.prefix:
checkpoints = [FLAGS.prefix + c for c in checkpoints]
else:
assert FLAGS.num_last_checkpoints >= 1, "Must average at least one model"
assert FLAGS.prefix, ("Prefix must be provided when averaging last"
" N checkpoints")
checkpoint_state = tf.train.get_checkpoint_state(
os.path.dirname(FLAGS.prefix))
# Checkpoints are ordered from oldest to newest.
checkpoints = checkpoint_state.all_model_checkpoint_paths[
-FLAGS.num_last_checkpoints:]
checkpoints = [c for c in checkpoints if checkpoint_exists(c)]
if not checkpoints:
if FLAGS.checkpoints:
raise ValueError(
"None of the provided checkpoints exist. %s" % FLAGS.checkpoints)
else:
raise ValueError("Could not find checkpoints at %s" %
os.path.dirname(FLAGS.prefix))
# Read variables from all checkpoints and average them.
tf.logging.info("Reading variables and averaging checkpoints:")
for c in checkpoints:
tf.logging.info("%s ", c)
var_list = tf.contrib.framework.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
if not name.startswith("global_step"):
var_values[name] = np.zeros(shape)
for checkpoint in checkpoints:
reader = tf.contrib.framework.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
var_dtypes[name] = tensor.dtype
var_values[name] += tensor
tf.logging.info("Read from checkpoint %s", checkpoint)
for name in var_values: # Average.
var_values[name] /= len(checkpoints)
tf_vars = [
tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name])
for v in var_values
]
placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
global_step = tf.Variable(
0, name="global_step", trainable=False, dtype=tf.int64)
saver = tf.train.Saver(tf.all_variables())
# Build a model consisting only of variables, set them to the average values.
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for p, assign_op, (name, value) in zip(placeholders, assign_ops,
six.iteritems(var_values)):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint.
saver.save(sess, FLAGS.output_path, global_step=global_step)
tf.logging.info("Averaged checkpoints saved in %s", FLAGS.output_path)
if __name__ == "__main__":
tf.app.run() | bsd-3-clause | -3,850,041,144,974,980,000 | 37.775 | 107 | 0.678848 | false |
necozay/tulip-control | tulip/transys/export/graph2dot.py | 1 | 17106 | # Copyright (c) 2013-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Convert labeled graph to dot using
pydot and custom filtering
"""
from __future__ import division
import logging
import re
from collections import Iterable
from textwrap import fill
from cStringIO import StringIO
import numpy as np
import networkx as nx
from networkx.utils import make_str
import pydot
# inline:
#
# import webcolors
logger = logging.getLogger(__name__)
def _states2dot_str(graph, to_pydot_graph, wrap=10,
tikz=False, rankdir='TB'):
"""Copy nodes to given Pydot graph, with attributes for dot export."""
# TODO generate LaTeX legend table for edge labels
states = graph.states
# get labeling def
if hasattr(graph, '_state_label_def'):
label_def = graph._state_label_def
if hasattr(graph, '_state_dot_label_format'):
label_format = graph._state_dot_label_format
else:
label_format = {'type?label': '', 'separator': '\n'}
for u, d in graph.nodes_iter(data=True):
# initial state ?
is_initial = u in states.initial
is_accepting = _is_accepting(graph, u)
# state annotation
node_dot_label = _form_node_label(
u, d, label_def,
label_format, wrap, tikz=tikz
)
# node_dot_label = fill(str(state), width=wrap)
rim_color = d.get('color', 'black')
if tikz:
_state2tikz(graph, to_pydot_graph, u,
is_initial, is_accepting, rankdir,
rim_color, d, node_dot_label)
else:
_state2dot(graph, to_pydot_graph, u,
is_initial, is_accepting,
rim_color, d, node_dot_label)
def _state2dot(graph, to_pydot_graph, state,
is_initial, is_accepting,
rim_color, d, node_dot_label):
if is_initial:
_add_incoming_edge(to_pydot_graph, state)
normal_shape = graph.dot_node_shape['normal']
accept_shape = graph.dot_node_shape.get('accepting', '')
shape = accept_shape if is_accepting else normal_shape
corners = 'rounded' if shape is 'rectangle' else ''
rim_color = '"' + _format_color(rim_color, 'dot') + '"'
fc = d.get('fillcolor', 'none')
filled = '' if fc is 'none' else 'filled'
if fc is 'gradient':
# top/bottom colors not supported for dot
lc = d.get('left_color', d['top_color'])
rc = d.get('right_color', d['bottom_color'])
if isinstance(lc, basestring):
fillcolor = lc
elif isinstance(lc, dict):
fillcolor = lc.keys()[0]
else:
raise TypeError('left_color must be str or dict.')
if isinstance(rc, basestring):
fillcolor += ':' + rc
elif isinstance(rc, dict):
fillcolor += ':' + rc.keys()[0]
else:
raise TypeError('right_color must be str or dict.')
else:
fillcolor = _format_color(fc, 'dot')
if corners and filled:
node_style = '"' + corners + ', ' + filled + '"'
elif corners:
node_style = '"' + corners + '"'
else:
node_style = '"' + filled + '"'
to_pydot_graph.add_node(
state,
label=node_dot_label,
shape=shape,
style=node_style,
color=rim_color,
fillcolor='"' + fillcolor + '"')
def _state2tikz(graph, to_pydot_graph, state,
is_initial, is_accepting, rankdir,
rim_color, d, node_dot_label):
style = 'state'
if rankdir is 'LR':
init_dir = 'initial left'
elif rankdir is 'RL':
init_dir = 'initial right'
elif rankdir is 'TB':
init_dir = 'initial above'
elif rankdir is 'BT':
init_dir = 'initial below'
else:
raise ValueError('Unknown rankdir')
if is_initial:
style += ', initial by arrow, ' + init_dir + ', initial text='
if is_accepting:
style += ', accepting'
if graph.dot_node_shape['normal'] is 'rectangle':
style += ', shape = rectangle, rounded corners'
# darken the rim
if 'black' in rim_color:
c = _format_color(rim_color, 'tikz')
else:
c = _format_color(rim_color, 'tikz') + '!black!30'
style += ', draw = ' + c
fill = d.get('fillcolor')
if fill is 'gradient':
s = {'top_color', 'bottom_color',
'left_color', 'right_color'}
for x in s:
if x in d:
style += ', ' + x + ' = ' + _format_color(d[x], 'tikz')
elif fill is not None:
# not gradient
style += ', fill = ' + _format_color(fill, 'tikz')
else:
logger.debug('fillcolor is None')
to_pydot_graph.add_node(
state,
texlbl=node_dot_label,
style=style)
def _format_color(color, prog='tikz'):
"""Encode color in syntax for given program.
@type color:
- C{str} for single color or
- C{dict} for weighted color mix
@type prog: 'tikz' or 'dot'
"""
if isinstance(color, basestring):
return color
if not isinstance(color, dict):
raise Exception('color must be str or dict')
if prog is 'tikz':
s = '!'.join([k + '!' + str(v) for k, v in color.iteritems()])
elif prog is 'dot':
t = sum(color.itervalues())
try:
import webcolors
# mix them
result = np.array((0.0, 0.0, 0.0))
for c, w in color.iteritems():
result += w/t * np.array(webcolors.name_to_rgb(c))
s = webcolors.rgb_to_hex(result)
except:
logger.warn('failed to import webcolors')
s = ':'.join([k + ';' + str(v/t) for k, v in color.iteritems()])
else:
raise ValueError('Unknown program: ' + str(prog) + '. '
"Available options are: 'dot' or 'tikz'.")
return s
def _place_initial_states(trs_graph, pd_graph, tikz):
init_subg = pydot.Subgraph('initial')
init_subg.set_rank('source')
for node in trs_graph.states.initial:
pd_node = pydot.Node(make_str(node))
init_subg.add_node(pd_node)
phantom_node = 'phantominit' + str(node)
pd_node = pydot.Node(make_str(phantom_node))
init_subg.add_node(pd_node)
pd_graph.add_subgraph(init_subg)
def _add_incoming_edge(g, state):
phantom_node = 'phantominit' + str(state)
g.add_node(phantom_node, label='""', shape='none', width='0')
g.add_edge(phantom_node, state)
def _form_node_label(state, state_data, label_def,
label_format, width=10, tikz=False):
# node itself
state_str = str(state)
state_str = state_str.replace("'", "")
# rm parentheses to reduce size of states in fig
if tikz:
state_str = state_str.replace('(', '')
state_str = state_str.replace(')', '')
# make indices subscripts
if tikz:
pattern = '([a-zA-Z]\d+)'
make_subscript = lambda x: x.group(0)[0] + '_' + x.group(0)[1:]
state_str = re.sub(pattern, make_subscript, state_str)
# SVG requires breaking the math environment into
# one math env per line. Just make 1st line math env
# if latex:
# state_str = '$' + state_str + '$'
# state_str = fill(state_str, width=width)
node_dot_label = state_str
# newline between state name and label, only if state is labeled
if len(state_data) != 0:
node_dot_label += r'\n'
# add node annotations from action, AP sets etc
# other key,values in state attr_dict ignored
pieces = list()
for (label_type, label_value) in state_data.iteritems():
if label_type not in label_def:
continue
# label formatting
type_name = label_format[label_type]
sep_type_value = label_format['type?label']
# avoid turning strings to lists,
# or non-iterables to lists
if isinstance(label_value, str):
label_str = fill(label_value, width=width)
elif isinstance(label_value, Iterable): # and not str
s = ', '.join([str(x) for x in label_value])
label_str = r'\\{' + fill(s, width=width) + r'\\}'
else:
label_str = fill(str(label_value), width=width)
pieces.append(type_name + sep_type_value + label_str)
sep_label_sets = label_format['separator']
node_dot_label += sep_label_sets.join(pieces)
if tikz:
# replace LF by latex newline
node_dot_label = node_dot_label.replace(r'\n', r'\\\\ ')
# dot2tex math mode doesn't handle newlines properly
node_dot_label = (
r'$\\begin{matrix} ' + node_dot_label +
r'\\end{matrix}$'
)
return node_dot_label
def _is_accepting(graph, state):
"""accepting state ?"""
# no accepting states defined ?
if not hasattr(graph.states, 'accepting'):
return False
return state in graph.states.accepting
def _transitions2dot_str(trans, to_pydot_graph, tikz=False):
"""Convert transitions to dot str.
@rtype: str
"""
if not hasattr(trans.graph, '_transition_label_def'):
return
if not hasattr(trans.graph, '_transition_dot_label_format'):
return
if not hasattr(trans.graph, '_transition_dot_mask'):
return
# get labeling def
label_def = trans.graph._transition_label_def
label_format = trans.graph._transition_dot_label_format
label_mask = trans.graph._transition_dot_mask
for (u, v, key, edge_data) in trans.graph.edges_iter(
data=True, keys=True
):
edge_dot_label = _form_edge_label(
edge_data, label_def,
label_format, label_mask, tikz
)
edge_color = edge_data.get('color', 'black')
to_pydot_graph.add_edge(u, v, key=key,
label=edge_dot_label,
color=edge_color)
def _form_edge_label(edge_data, label_def,
label_format, label_mask, tikz):
label = '' # dot label for edge
sep_label_sets = label_format['separator']
for label_type, label_value in edge_data.iteritems():
if label_type not in label_def:
continue
# masking defined ?
# custom filter hiding based on value
if label_type in label_mask:
# not show ?
if not label_mask[label_type](label_value):
continue
# label formatting
if label_type in label_format:
type_name = label_format[label_type]
sep_type_value = label_format['type?label']
else:
type_name = ':'
sep_type_value = r',\n'
# format iterable containers using
# mathematical set notation: {...}
if isinstance(label_value, basestring):
# str is Iterable: avoid turning it to list
label_str = label_value
elif isinstance(label_value, Iterable):
s = ', '.join([str(x) for x in label_value])
label_str = r'\\{' + fill(s) + r'\\}'
else:
label_str = str(label_value)
if tikz:
type_name = r'\mathrm' + '{' + type_name + '}'
label += (type_name + sep_type_value +
label_str + sep_label_sets)
if tikz:
label = r'\\begin{matrix}' + label + r'\\end{matrix}'
label = '"' + label + '"'
return label
def _graph2pydot(graph, wrap=10, tikz=False,
rankdir='TB'):
"""Convert (possibly labeled) state graph to dot str.
@type graph: L{LabeledDiGraph}
@rtype: str
"""
dummy_nx_graph = nx.MultiDiGraph()
_states2dot_str(graph, dummy_nx_graph, wrap=wrap, tikz=tikz,
rankdir=rankdir)
_transitions2dot_str(graph.transitions, dummy_nx_graph, tikz=tikz)
pydot_graph = nx.drawing.nx_pydot.to_pydot(dummy_nx_graph)
_place_initial_states(graph, pydot_graph, tikz)
pydot_graph.set_overlap('false')
# pydot_graph.set_size('"0.25,1"')
# pydot_graph.set_ratio('"compress"')
pydot_graph.set_nodesep(0.5)
pydot_graph.set_ranksep(0.1)
return pydot_graph
def graph2dot_str(graph, wrap=10, tikz=False):
"""Convert graph to dot string.
Requires pydot.
@type graph: L{LabeledDiGraph}
@param wrap: textwrap width
@rtype: str
"""
pydot_graph = _graph2pydot(graph, wrap=wrap, tikz=tikz)
return pydot_graph.to_string()
def save_dot(graph, path, fileformat, rankdir, prog, wrap, tikz=False):
"""Save state graph to dot file.
@type graph: L{LabeledDiGraph}
@return: True upon success
@rtype: bool
"""
pydot_graph = _graph2pydot(graph, wrap=wrap, tikz=tikz,
rankdir=rankdir)
if pydot_graph is None:
# graph2dot must have printed warning already
return False
pydot_graph.set_rankdir(rankdir)
pydot_graph.set_splines('true')
# turn off graphviz warnings caused by tikz labels
if tikz:
prog = [prog, '-q 1']
pydot_graph.write(path, format=fileformat, prog=prog)
return True
def plot_pydot(graph, prog='dot', rankdir='LR', wrap=10, ax=None):
"""Plot a networkx or pydot graph using dot.
No files written or deleted from the disk.
Note that all networkx graph classes are inherited
from networkx.Graph
See Also
========
dot & pydot documentation
@param graph: to plot
@type graph: networkx.Graph | pydot.Graph
@param prog: GraphViz programto use
@type prog: 'dot' | 'neato' | 'circo' | 'twopi'
| 'fdp' | 'sfdp' | etc
@param rankdir: direction to layout nodes
@type rankdir: 'LR' | 'TB'
@param ax: axes
"""
try:
pydot_graph = _graph2pydot(graph, wrap=wrap)
except:
if isinstance(graph, nx.Graph):
pydot_graph = nx.drawing.nx_pydot.to_pydot(graph)
else:
raise TypeError(
'graph not networkx or pydot class.' +
'Got instead: ' + str(type(graph)))
pydot_graph.set_rankdir(rankdir)
pydot_graph.set_splines('true')
pydot_graph.set_bgcolor('gray')
png_str = pydot_graph.create_png(prog=prog)
# installed ?
try:
from IPython.display import display, Image
logger.debug('IPython installed.')
# called by IPython ?
try:
cfg = get_ipython().config
logger.debug('Script called by IPython.')
# Caution!!! : not ordinary dict,
# but IPython.config.loader.Config
# qtconsole ?
if cfg['IPKernelApp']:
logger.debug('Within IPython QtConsole.')
display(Image(data=png_str))
return True
except:
print('IPython installed, but not called from it.')
except ImportError:
logger.warn('IPython not found.\nSo loaded dot images not inline.')
# not called from IPython QtConsole, try Matplotlib...
# installed ?
try:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
except:
logger.debug('Matplotlib not installed.')
logger.warn('Neither IPython QtConsole nor Matplotlib available.')
return None
logger.debug('Matplotlib installed.')
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
sio = StringIO()
sio.write(png_str)
sio.seek(0)
img = mpimg.imread(sio)
ax.imshow(img, aspect='equal')
plt.show(block=False)
return ax
| bsd-3-clause | -4,901,562,265,867,219,000 | 29.169312 | 76 | 0.592365 | false |
moto-timo/robotframework | src/robot/utils/robotpath.py | 1 | 5342 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import sys
try:
from urllib import pathname2url
except ImportError:
from urllib.request import pathname2url
from robot.errors import DataError
from .encoding import decode_from_system
from .platform import WINDOWS
from .robottypes import is_unicode
if sys.version_info < (2,7):
_abspath = lambda path: os.path.join(os.getcwdu(), path)
else:
_abspath = os.path.abspath
if WINDOWS:
CASE_INSENSITIVE_FILESYSTEM = True
else:
try:
CASE_INSENSITIVE_FILESYSTEM = os.listdir('/tmp') == os.listdir('/TMP')
except OSError:
CASE_INSENSITIVE_FILESYSTEM = False
def normpath(path, case_normalize=False):
"""Replacement for os.path.normpath with some enhancements.
1. Non-Unicode paths are converted to Unicode using file system encoding.
2. Optionally lower-case paths on case-insensitive file systems.
That includes Windows and also OSX in default configuration.
3. Turn ``c:`` into ``c:\\`` on Windows instead of keeping it as ``c:``.
"""
if not is_unicode(path):
path = decode_from_system(path)
path = os.path.normpath(path)
if case_normalize and CASE_INSENSITIVE_FILESYSTEM:
path = path.lower()
if WINDOWS and len(path) == 2 and path[1] == ':':
return path + '\\'
return path
def abspath(path, case_normalize=False):
"""Replacement for os.path.abspath with some enhancements and bug fixes.
1. Non-Unicode paths are converted to Unicode using file system encoding.
2. Optionally lower-case paths on case-insensitive file systems.
That includes Windows and also OSX in default configuration.
3. Turn ``c:`` into ``c:\\`` on Windows instead of ``c:\\current\\path``.
4. Handle non-ASCII characters on working directory with Python < 2.6.5:
http://bugs.python.org/issue3426
"""
path = normpath(path, case_normalize)
if os.path.isabs(path):
return path
return normpath(_abspath(path), case_normalize)
# TODO: Investigate could this be replaced with os.path.relpath in RF 2.9.
def get_link_path(target, base):
"""Returns a relative path to a target from a base.
If base is an existing file, then its parent directory is considered.
Otherwise, base is assumed to be a directory.
Rationale: os.path.relpath is not available before Python 2.6
"""
path = _get_pathname(target, base)
url = pathname2url(path.encode('UTF-8'))
if os.path.isabs(path):
url = 'file:' + url
# At least Jython seems to use 'C|/Path' and not 'C:/Path'
if os.sep == '\\' and '|/' in url:
url = url.replace('|/', ':/', 1)
return url.replace('%5C', '/').replace('%3A', ':').replace('|', ':')
def _get_pathname(target, base):
target = abspath(target)
base = abspath(base)
if os.path.isfile(base):
base = os.path.dirname(base)
if base == target:
return os.path.basename(target)
base_drive, base_path = os.path.splitdrive(base)
# if in Windows and base and link on different drives
if os.path.splitdrive(target)[0] != base_drive:
return target
common_len = len(_common_path(base, target))
if base_path == os.sep:
return target[common_len:]
if common_len == len(base_drive) + len(os.sep):
common_len -= len(os.sep)
dirs_up = os.sep.join([os.pardir] * base[common_len:].count(os.sep))
return os.path.join(dirs_up, target[common_len + len(os.sep):])
def _common_path(p1, p2):
"""Returns the longest path common to p1 and p2.
Rationale: as os.path.commonprefix is character based, it doesn't consider
path separators as such, so it may return invalid paths:
commonprefix(('/foo/bar/', '/foo/baz.txt')) -> '/foo/ba' (instead of /foo)
"""
while p1 and p2:
if p1 == p2:
return p1
if len(p1) > len(p2):
p1 = os.path.dirname(p1)
else:
p2 = os.path.dirname(p2)
return ''
def find_file(path, basedir='.', file_type=None):
path = os.path.normpath(path.replace('/', os.sep))
for base in [basedir] + sys.path:
if not (base and os.path.isdir(base)):
continue
if not is_unicode(base):
base = decode_from_system(base)
ret = os.path.abspath(os.path.join(base, path))
if os.path.isfile(ret):
return ret
if os.path.isdir(ret) and os.path.isfile(os.path.join(ret, '__init__.py')):
return ret
default = file_type or 'File'
file_type = {'Library': 'Test library',
'Variables': 'Variable file',
'Resource': 'Resource file'}.get(file_type, default)
raise DataError("%s '%s' does not exist." % (file_type, path))
| apache-2.0 | 160,849,017,757,947,400 | 35.094595 | 83 | 0.647885 | false |
Edraak/edraak-platform | lms/djangoapps/course_api/tests/test_forms.py | 1 | 4312 | """
Tests for Course API forms.
"""
from itertools import product
from urllib import urlencode
import ddt
from django.contrib.auth.models import AnonymousUser
from django.http import QueryDict
from openedx.core.djangoapps.util.test_forms import FormTestMixin
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..forms import CourseDetailGetForm, CourseListGetForm
class UsernameTestMixin(object):
"""
Tests the username Form field.
"""
shard = 4
def test_no_user_param_anonymous_access(self):
self.set_up_data(AnonymousUser())
self.form_data.pop('username')
self.assert_valid(self.cleaned_data)
def test_no_user_param(self):
self.set_up_data(AnonymousUser())
self.form_data.pop('username')
self.assert_valid(self.cleaned_data)
@ddt.ddt
class TestCourseListGetForm(FormTestMixin, UsernameTestMixin, SharedModuleStoreTestCase):
"""
Tests for CourseListGetForm
"""
shard = 4
FORM_CLASS = CourseListGetForm
@classmethod
def setUpClass(cls):
super(TestCourseListGetForm, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCourseListGetForm, self).setUp()
self.student = UserFactory.create()
self.set_up_data(self.student)
def set_up_data(self, user):
"""
Sets up the initial form data and the expected clean data.
"""
self.initial = {'requesting_user': user}
self.form_data = QueryDict(
urlencode({
'username': user.username,
}),
mutable=True,
)
self.cleaned_data = {
'username': user.username,
'org': '',
'mobile': None,
'search_term': '',
'filter_': None,
'ids': None,
}
def test_basic(self):
self.assert_valid(self.cleaned_data)
def test_org(self):
org_value = 'test org name'
self.form_data['org'] = org_value
self.cleaned_data['org'] = org_value
self.assert_valid(self.cleaned_data)
@ddt.data(
*product(
[('mobile', 'mobile_available')],
[(True, True), (False, False), ('1', True), ('0', False), (None, None)],
)
)
@ddt.unpack
def test_filter(self, param_field_name, param_field_value):
param_name, field_name = param_field_name
param_value, field_value = param_field_value
self.form_data[param_name] = param_value
self.cleaned_data[param_name] = field_value
if field_value is not None:
self.cleaned_data['filter_'] = {field_name: field_value}
self.assert_valid(self.cleaned_data)
class TestCourseDetailGetForm(FormTestMixin, UsernameTestMixin, SharedModuleStoreTestCase):
"""
Tests for CourseDetailGetForm
"""
shard = 4
FORM_CLASS = CourseDetailGetForm
@classmethod
def setUpClass(cls):
super(TestCourseDetailGetForm, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCourseDetailGetForm, self).setUp()
self.student = UserFactory.create()
self.set_up_data(self.student)
def set_up_data(self, user):
"""
Sets up the initial form data and the expected clean data.
"""
self.initial = {'requesting_user': user}
self.form_data = QueryDict(
urlencode({
'username': user.username,
'course_key': unicode(self.course.id),
}),
mutable=True,
)
self.cleaned_data = {
'username': user.username,
'course_key': self.course.id,
}
def test_basic(self):
self.assert_valid(self.cleaned_data)
#-- course key --#
def test_no_course_key_param(self):
self.form_data.pop('course_key')
self.assert_error('course_key', "This field is required.")
def test_invalid_course_key(self):
self.form_data['course_key'] = 'invalid_course_key'
self.assert_error('course_key', "'invalid_course_key' is not a valid course key.")
| agpl-3.0 | -8,221,865,995,090,685,000 | 27.368421 | 91 | 0.612013 | false |
tastyproject/tasty | tasty/tastyc/tastyc.py | 1 | 9949 | # -*- coding: utf-8 -*-
"""tastyc configuration module"""
import copy
import sys
import os.path
from ast import *
import gc
from tasty import state
from tasty.exc import TastySyntaxError
from tasty.types import *
from tasty.tastyc import bases
from tasty.tastyc.codegen import to_source
from tasty.tastyc.analyzation import Parenter
from tasty.tastyc.analyzation import Qualificator
from tasty.tastyc.analyzation import Symbolizer
from tasty.tastyc.analyzation import AttributePropagator
from tasty.tastyc.analyzation import ConstantSymbolizer
from tasty.tastyc.pass_dispatching import OnlinePassDispatcher, SetupPassDispatcher, OnlinePruner, SetupPruner
from tasty.tastyc.transformation import DriverParameterPropagator
from tasty.tastyc.transformation import OnlineTransformer, SetupTransformer
from tasty.tastyc.transformation import KwargsPropagator
from tasty.tastyc.transformation import SimpleEvaluator
from tasty.tastyc.transformation import PlainTypeConverter
from tasty.tastyc.transformation import TypeCompletionTransformer
from tasty.tastyc.transformation import ConstantPropagator
from tasty.tastyc.transformation import DanglingGarbledBinder
from tasty.tastyc.analyze_costs import analyze_costs
__all__ = ["compiler_start", "compiler_start_driver_mode"]
state.my_globals = globals()
def compile_protocol():
"""creates custom protocol versions tailored for setup
and online phase"""
config = state.config
full_ast = bases.TastyCBase.full_ast
setup_ast = copy.deepcopy(full_ast)
online_ast = copy.deepcopy(full_ast)
setup_symbol_table = copy.deepcopy(bases.TastyCBase.symbol_table)
online_symbol_table = copy.deepcopy(bases.TastyCBase.symbol_table)
if "types" not in bases.TastyCBase.imports:
types_import = ImportFrom(module='tasty.types',
names=[alias(name='*', asname=None)], level=0)
setup_ast.body.insert(0, types_import)
online_ast.body.insert(0, types_import)
if "conversions" not in bases.TastyCBase.imports:
con_import = ImportFrom(module='tasty.types',
names=[alias(name='conversions', asname=None)], level=0)
setup_ast.body.insert(0, con_import)
online_ast.body.insert(0, con_import)
if __debug__:
state.log.info("\ncompiling setup protocol version...")
setup_ast = SetupTransformer(setup_symbol_table).visit(setup_ast)
SetupPassDispatcher(setup_symbol_table).visit(setup_ast)
setup_ast = SetupPruner(setup_symbol_table).visit(setup_ast)
setup_ast = TypeCompletionTransformer(setup_symbol_table).visit(setup_ast)
fix_missing_locations(setup_ast)
setup_filename = protocol_path("{0}.py".format(config.final_setup_protocol))
f = open(setup_filename, "w")
f.write(to_source(setup_ast))
f.close()
if __debug__:
state.log.info("\ncompiling online protocol version...")
OnlineTransformer(online_symbol_table).visit(online_ast)
OnlinePassDispatcher(online_symbol_table).visit(online_ast)
OnlinePruner(online_symbol_table).visit(online_ast)
TypeCompletionTransformer(online_symbol_table).visit(online_ast)
fix_missing_locations(online_ast)
online_filename = protocol_path("{0}.py".format(config.final_online_protocol))
f = open(online_filename, "w")
f.write(to_source(online_ast))
f.close()
return setup_ast, online_ast
def propagate_constants(ast):
p = ConstantPropagator()
ast = p.visit(ast)
p.cleanup_symbol_table()
p.visit_Assign = p.visit_Assign_2nd_pass
p.visit_Name = p.visit_Name_2nd_pass
ast = p.visit(ast)
return ast
def bind_dangling_garbleds(ast):
p = DanglingGarbledBinder()
full_ast = p.visit(ast)
p.finish()
return full_ast
def do_driver_selection(original_ast):
log = state.log
config = state.config
num_drivers = len(state.driver_classes)
if num_drivers > 1:
if config.driver_name in state.driver_classes:
state.driver_class = config.driver_name
else:
while 1:
chosen_driver = int(raw_input("Found %d different 'Driver' implementations.\nPlease select intended driver via -D <DriverName> flag, or choose from the following list:\n%s\n:" %
(num_drivers,
"\n".join("%d - %s" % (ix, cname)
for ix,cname in enumerate(state.driver_classes)))
))
if 0 <= chosen_driver < len(state.driver_classes):
state.driver_class = state.driver_classes[chosen_driver]
break
elif num_drivers == 1:
state.driver_class = state.driver_classes[0]
if config.test_mode:
config.driver_mode = True
bases.assign_driver(original_ast, "TestDriver")
state.use_driver = True
if "TestDriver" not in bases.TastyCBase.imports:
driver_import = ImportFrom(module='tasty.types.driver',
names=[alias(name='TestDriver', asname=None)], level=0)
bases.TastyCBase.imports.add("TestDriver")
original_ast.body.insert(0, driver_import)
elif config.use_driver:
if not state.driver_class:
state.log.error("You selected driver mode without implementing a test driver.\nPlease provide one by subclassing from 'Driver' in the protocol!")
sys.exit(-1)
if not bases.check_driver_assignment(state.driver_class):
bases.assign_driver(original_ast, state.driver_class)
if not state.protocol_instrumentated:
state.log.error("Error: You requested driver mode, but provided a protocol without the 3rd formal parameter 'params'.\nPlease provide a protocol with the signature 'protocol(client, server, params)'")
sys.exit(-1)
elif state.driver_class or state.protocol_instrumentated:
if not bases.check_driver_assignment("IODriver"):
bases.assign_driver(original_ast, "IODriver", True)
if "IODriver" not in bases.TastyCBase.imports:
driver_import = ImportFrom(module='tasty.types.driver',
names=[alias(name='IODriver', asname=None)], level=0)
bases.TastyCBase.imports.add("IODriver")
original_ast.body.insert(0, driver_import)
def clean_protocol_environment():
"""cleaning possibly created modules and memory"""
bases.TastyCBase.symbol_table.clear()
try:
del sys.modules[state.config.final_setup_protocol]
except KeyError:
pass
try:
del sys.modules[state.config.final_online_protocol]
except KeyError:
pass
gc.collect()
def compiler_start():
"""analyzes protocol structure, runs several optimization technics,
retrieves abstract costs and transforms tasty protocols into
internal representation.
For now we have implemented constant propagation, partial evaluation
and dead code elimination."""
log = state.log
config = state.config
#if config.exclude_compiler:
#return
if __debug__:
log.info("starting tasty compiler...")
# this can be important if there are defined and registered new tasty
# primitives get available at analyzation time in tasty protocols
old_path = sys.path
sys.path = [config.protocol_dir, ] + sys.path
g = globals()
protocol = __import__("protocol", g,
g, [])
sys.path = old_path
state.my_globals.update(protocol.__dict__)
bases.TastyCBase.symbol_table.clear()
text = open(config.protocol_file_path).read().replace("\r\n", "\n")
bases.TastyCBase.original_ast = original_ast = compile(
text, config.protocol_file_path, "exec", PyCF_ONLY_AST)
Parenter().visit(original_ast)
Qualificator().visit(original_ast)
do_driver_selection(original_ast)
AttributePropagator().visit(original_ast)
fix_missing_locations(original_ast)
f = open(protocol_path("protocol_final.py"), "w")
f.write(to_source(original_ast))
f.close()
protocol = __import__("protocol_final", g, g, [])
full_ast = bases.TastyCBase.full_ast = original_ast
if not config.use_driver:
if state.assigned_driver_node:
bases.TastyCBase.full_ast = full_ast = copy.deepcopy(
original_ast)
bases.TastyCBase.full_ast = full_ast = DriverParameterPropagator(
protocol.driver.next_params().next()).visit(full_ast)
ConstantSymbolizer().visit(full_ast)
full_ast = propagate_constants(full_ast)
full_ast = SimpleEvaluator().visit(full_ast)
full_ast = PlainTypeConverter().visit(full_ast)
fix_missing_locations(full_ast)
symbolizer = Symbolizer(state.my_globals)
symbolizer.visit(full_ast)
try:
symbolizer.check()
except Exception, e:
state.log.exception(e)
sys.exit(-1)
full_ast = bind_dangling_garbleds(full_ast)
setup_ast, online_ast = compile_protocol()
analyze_costs(setup_ast, online_ast)
if __debug__:
log.info("tasty compiler done")
def compiler_start_driver_mode(kwargs):
"""called before each driver run iteration"""
# cleanup
clean_protocol_environment()
bases.TastyCBase.full_ast = full_ast = copy.deepcopy(
bases.TastyCBase.original_ast)
DriverParameterPropagator(kwargs).visit(full_ast)
ConstantSymbolizer().visit(full_ast)
full_ast = propagate_constants(full_ast)
full_ast = SimpleEvaluator().visit(full_ast)
full_ast = PlainTypeConverter().visit(full_ast)
fix_missing_locations(full_ast)
symbolizer = Symbolizer(state.my_globals)
symbolizer.visit(full_ast)
symbolizer.check()
full_ast = bind_dangling_garbleds(full_ast)
# compile ast into internal representation (actually real python code)
setup_ast, online_ast = compile_protocol()
# static cost analyzation
analyze_costs(setup_ast, online_ast)
| gpl-3.0 | -1,735,557,266,108,462,600 | 34.280142 | 212 | 0.679666 | false |
mitsei/dlkit | dlkit/json_/authorization/searches.py | 1 | 11337 | """JSON implementations of authorization searches."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import objects
from . import queries
from .. import utilities
from ..osid import searches as osid_searches
from ..primitives import Id
from ..utilities import get_registry
from dlkit.abstract_osid.authorization import searches as abc_authorization_searches
from dlkit.abstract_osid.osid import errors
class AuthorizationSearch(abc_authorization_searches.AuthorizationSearch, osid_searches.OsidSearch):
"""``AuthorizationSearch`` defines the interface for specifying authorization search options."""
def __init__(self, runtime):
self._namespace = 'authorization.Authorization'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_authorizations(self, authorization_ids):
"""Execute this search among the given list of authorizations.
arg: authorization_ids (osid.id.IdList): list of
authorizations
raise: NullArgument - ``authorization_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = authorization_ids
@utilities.arguments_not_none
def order_authorization_results(self, authorization_search_order):
"""Specify an ordering to the search results.
arg: authorization_search_order
(osid.authorization.AuthorizationSearchOrder):
authorization search order
raise: NullArgument - ``authorization_search_order`` is
``null``
raise: Unsupported - ``authorization_search_order`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorization_search_record(self, authorization_search_record_type):
"""Gets the authorization search record corresponding to the given authorization search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: authorization_search_record_type (osid.type.Type): an
authorization search record type
return: (osid.authorization.records.AuthorizationSearchRecord) -
the authorization search record
raise: NullArgument - ``authorization_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(authorization_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class AuthorizationSearchResults(abc_authorization_searches.AuthorizationSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'authorization.Authorization'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_authorizations(self):
"""Gets the authorization list resulting from the search.
return: (osid.authorization.AuthorizationList) - the
authorization list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.AuthorizationList(self._results, runtime=self._runtime)
authorizations = property(fget=get_authorizations)
def get_authorization_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.authorization.AuthorizationQueryInspector) - the
query inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.AuthorizationQueryInspector(self._query_terms, runtime=self._runtime)
authorization_query_inspector = property(fget=get_authorization_query_inspector)
@utilities.arguments_not_none
def get_authorization_search_results_record(self, authorization_search_record_type):
"""Gets the authorization search results record corresponding to the given authorization search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: authorization_search_record_type (osid.type.Type): an
authorization search record type
return:
(osid.authorization.records.AuthorizationSearchResultsRe
cord) - the authorization search results record
raise: NullArgument - ``authorization_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(authorization_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class VaultSearch(abc_authorization_searches.VaultSearch, osid_searches.OsidSearch):
"""The interface for governing vault searches."""
def __init__(self, runtime):
self._namespace = 'authorization.Vault'
self._runtime = runtime
record_type_data_sets = get_registry('RESOURCE_RECORD_TYPES', runtime)
self._record_type_data_sets = record_type_data_sets
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
self._id_list = None
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_searches.OsidSearch.__init__(self, runtime)
@utilities.arguments_not_none
def search_among_vaults(self, vault_ids):
"""Execute this search among the given list of vaults.
arg: vault_ids (osid.id.IdList): list of vaults
raise: NullArgument - ``vault_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._id_list = vault_ids
@utilities.arguments_not_none
def order_vault_results(self, vault_search_order):
"""Specify an ordering to the search results.
arg: vault_search_order
(osid.authorization.VaultSearchOrder): vault search
order
raise: NullArgument - ``vault_search_order`` is ``null``
raise: Unsupported - ``vault_search_order`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_vault_search_record(self, vault_search_record_type):
"""Gets the vault search record corresponding to the given vault search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: vault_search_record_type (osid.type.Type): a vault
search record type
return: (osid.authorization.records.VaultSearchRecord) - the
vault search record
raise: NullArgument - ``vault_search_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(vault_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class VaultSearchResults(abc_authorization_searches.VaultSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def __init__(self, results, query_terms, runtime):
# if you don't iterate, then .count() on the cursor is an inaccurate representation of limit / skip
# self._results = [r for r in results]
self._namespace = 'authorization.Vault'
self._results = results
self._query_terms = query_terms
self._runtime = runtime
self.retrieved = False
def get_vaults(self):
"""Gets the vault list resulting from the search.
return: (osid.authorization.VaultList) - the vault list
raise: IllegalState - list has already been retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.VaultList(self._results, runtime=self._runtime)
vaults = property(fget=get_vaults)
def get_vault_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.authorization.VaultQueryInspector) - the vault
query inspector
*compliance: mandatory -- This method must be implemented.*
"""
return queries.VaultQueryInspector(self._query_terms, runtime=self._runtime)
vault_query_inspector = property(fget=get_vault_query_inspector)
@utilities.arguments_not_none
def get_vault_search_results_record(self, vault_search_record_type):
"""Gets the vault search results record corresponding to the given vault search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: vault_search_record_type (osid.type.Type): a vault
search record type
return: (osid.authorization.records.VaultSearchResultsRecord) -
the vault search results record
raise: NullArgument - ``vault_search_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(vault_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
| mit | -2,990,941,319,276,016,000 | 40.988889 | 121 | 0.659963 | false |
chrisguiney/mudpy | mudpy/character/__init__.py | 1 | 1901 | from functools import partial
from .. import races
from .. import classes
from ..magic import SpellBook
class Character(object):
def __init__(self, name, description, equipment, inventory, attributes, level=1, race="Human", base_class="Fighter",
spellbook=None):
"""
@param name: Name of Character
@type name: str
@param description: Description of Character
@type description: str
@param equipment: Dictionary of item slot to equipped item
@type equipment: dict
@param inventory: List of items
@type inventory: list of item
@param attributes: Dictionary of attributes to values
@type attributes: dict
@param level: Integer level number
@type level: int
@param race: Name of race as string
@type race: str
@param base_class: Name of base character class as string
@type base_class: str
@param spellbook: Spellbook object of memorized spells
@type spellbook: SpellBook
@return: None
"""
self.name = name
self.level = level
self.description = description
self.attributes = attributes
self.equipment = equipment
self.inventory = inventory
self.race = getattr(races, race, partial(races.raise_invalid_race, race))()
self.base_class = getattr(classes, base_class)()
self.spellbook = spellbook
if self.base_class.is_caster and spellbook is None:
self.spellbook = SpellBook()
def cast(self, spell, on_target):
return (spell.base_damage.roll() + spell.level_modifier(self.level)) - on_target.resistance(spell.damage_type)
def resistance(self, damage_type):
pass
def equip(self, item):
assert(item in self.inventory)
self.equipment[item.slot] = item | apache-2.0 | -1,466,427,402,006,687,000 | 27.818182 | 120 | 0.624934 | false |
kevinkahn/softconsole | screens/specificscreens/weatherscreen.py | 1 | 6148 | from collections import OrderedDict
import debug
import logsupport
from screens import screen
import screens.__screens as screens
from keyspecs import toucharea
from utils import utilities, fonts, hw
from stores import valuestore
from utils.utilfuncs import wc, fmt
from utils.weatherfromatting import CreateWeathBlock
fsizes = ((20, False, False), (30, True, False), (45, True, True))
class WeatherScreenDesc(screen.ScreenDesc):
def __init__(self, screensection, screenname):
debug.debugPrint('Screen', "New WeatherScreenDesc ", screenname)
super().__init__(screensection, screenname)
butsize = self.ButSize(1, 1, 0)
self.Keys = OrderedDict({'condorfcst': toucharea.TouchPoint('condorfcst', (
self.HorizBorder + .5 * butsize[0], self.TopBorder + .5 * butsize[1]), butsize,
proc=self.CondOrFcst)})
self.currentconditions = True # show conditions or forecast
screen.AddUndefaultedParams(self, screensection, location='',
LocationSize=0) # default to no location now that screen title in use
self.SetScreenTitle(screen.FlatenScreenLabel(self.label), 50, self.CharColor)
self.condformat = u"{d[0]} {d[1]}\u00B0F", u" Feels like: {d[2]}\u00B0", "Wind {d[3]}@{d[4]}"
self.condfields = list(((self.location, 'Cond', x) for x in ('Sky', 'Temp', 'Feels', 'WindDir', 'WindMPH')))
# self.dayformat = "Sunrise: {d[0]:02d}:{d[1]:02d}","Sunset: {d[2]:02d}:{d[3]:02d}","Moon rise: {d[4]} set: {d[5]}","{d[6]}% illuminated"
# self.dayfields = list(((self.location, 'Cond', x) for x in ('SunriseH','SunriseM','SunsetH','SunsetM','Moonrise','Moonset','MoonPct')))
self.dayformat = "Sunrise: {d[0]}", "Sunset: {d[1]}" # , "Moon rise: {d[2]} set: {d[3]}"
self.dayfields = list(((self.location, 'Cond', x) for x in ('Sunrise', 'Sunset'))) # , 'Moonrise', 'Moonset')))
self.footformat = "Readings as of {d[0]}",
self.footfields = ((self.location, 'Cond', 'Age'),)
self.fcstformat = u"{d[0]} {d[1]}\u00B0/{d[2]}\u00B0 {d[3]}", "Wind: {d[4]}"
self.fcstfields = list(((self.location, 'Fcst', x) for x in ('Day', 'High', 'Low', 'Sky', 'WindSpd')))
try:
self.store = valuestore.ValueStores[self.location]
except KeyError:
logsupport.Logs.Log("Weather screen {} using non-existent location {}".format(screenname, self.location),
severity=logsupport.ConsoleWarning)
raise ValueError
utilities.register_example("WeatherScreenDesc", self)
# noinspection PyUnusedLocal
def CondOrFcst(self):
self.currentconditions = not self.currentconditions
self.ReInitDisplay()
def ScreenContentRepaint(self):
# todo given the useable vert space change should check for overflow or auto size font
vert_off = self.startvertspace
if not self.store.ValidWeather:
renderedlines = [
fonts.fonts.Font(45, "").render(x, 0, wc(self.CharColor)) for x in self.store.Status]
for l in renderedlines:
hw.screen.blit(l, ((hw.screenwidth - l.get_width()) / 2, vert_off))
vert_off = vert_off + 60 # todo use useable space stuff and vert start
else:
renderedlines = []
if self.LocationSize != 0:
locblk = fonts.fonts.Font(self.LocationSize, "").render(
fmt.format("{d}", d=self.store.GetVal(('Cond', 'Location'))), 0,
wc(self.CharColor))
hw.screen.blit(locblk, ((hw.screenwidth - locblk.get_width()) / 2, vert_off))
vert_off = vert_off + locblk.get_height() + 10 # todo gap of 10 pixels is arbitrary
h = vert_off
if self.currentconditions: # todo add max width and wrap
renderedlines.append(
CreateWeathBlock(self.condformat, self.condfields, "", [45, 25, 35], self.CharColor,
(self.location, 'Cond', 'Icon'), False))
h = h + renderedlines[-1].get_height()
renderedlines.append(
CreateWeathBlock(self.dayformat, self.dayfields, "", [30], self.CharColor, None, True))
h = h + renderedlines[-1].get_height()
renderedlines.append(
CreateWeathBlock(self.footformat, self.footfields, "", [25], self.CharColor, None, True))
h = h + renderedlines[-1].get_height()
s = (self.useablevertspace - h) / (len(renderedlines) - 1) if len(renderedlines) > 1 else 0
for l in renderedlines:
hw.screen.blit(l, ((hw.screenwidth - l.get_width()) / 2, vert_off))
vert_off = vert_off + l.get_height() + s
else:
fcstlines = 0
if hw.screenwidth > 350:
screenmaxfcstwidth = self.useablehorizspace // 2 - 10
else:
screenmaxfcstwidth = self.useablehorizspace
fcstdays = min(valuestore.GetVal((self.location, 'FcstDays')), 14) # cap at 2 weeks
maxfcstwidth = 0
maxfcstheight = 0
if fcstdays > 0:
for i in range(fcstdays):
renderedlines.append(
CreateWeathBlock(self.fcstformat, self.fcstfields, "", [25], self.CharColor,
# todo compute font size based on useable
(self.location, 'Fcst', 'Icon'), False, day=i,
maxhorizwidth=screenmaxfcstwidth))
if renderedlines[-1].get_width() > maxfcstwidth: maxfcstwidth = renderedlines[-1].get_width()
if renderedlines[-1].get_height() > maxfcstheight: maxfcstheight = renderedlines[
-1].get_height()
fcstlines += 1
else:
renderedlines.append(fonts.fonts.Font(35, "").render("No Forecast Available", 0,
wc(self.CharColor)))
if hw.screenwidth > 350:
h = h + renderedlines[-1].get_height() * 5
fcstlines = 2 + (fcstlines + 1) / 2
usewidth = hw.screenwidth / 2
else:
h = h + renderedlines[-1].get_height() * 5
fcstlines = 5
usewidth = hw.screenwidth
s = (self.useablevertspace - h) / (fcstlines + 1)
startvert = vert_off
horiz_off = (usewidth - maxfcstwidth) / 2
swcol = -int(-fcstdays // 2) - 1
for dy, fcst in enumerate(renderedlines):
hw.screen.blit(fcst, (horiz_off, vert_off))
vert_off = vert_off + s + maxfcstheight
if (dy == swcol) and (hw.screenwidth > 350):
horiz_off = horiz_off + usewidth
vert_off = startvert
def InitDisplay(self, nav):
self.currentconditions = True
super().InitDisplay(nav)
def ReInitDisplay(self):
super().ReInitDisplay()
screens.screentypes["Weather"] = WeatherScreenDesc
| apache-2.0 | 1,973,004,741,115,924,700 | 40.540541 | 141 | 0.658913 | false |
TotalVerb/territory | territory/recurser.py | 1 | 3793 | # ------------------------------------------------------------------------
#
# This file is part of Territory.
#
# Territory is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Territory is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Territory. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright Territory Development Team
# <https://github.com/TotalVerb/territory>
# Copyright Conquer Development Team (http://code.google.com/p/pyconquer/)
#
# ------------------------------------------------------------------------
class Recurser:
def __init__(self, board):
self.board = board
def count_dumps_on_island(self, x, y):
dumps_coord_list = []
player = self.board.data[x, y]
# Crawl island from (x, y)
land_area = self.crawl(x, y, [player])
# Let's iterate through crawled places
for coordinate in land_area:
# Check if current coordinate has a dump
# (data can take the coordinate-string)
actor = self.board.actor_at(coordinate)
if actor and actor.dump:
assert actor.side == player
dumps_coord_list.append(coordinate)
return [dumps_coord_list, land_area]
def find_land(self):
"""Find a square with land."""
for x in range(self.board.width):
for y in range(self.board.height):
if self.board.data[x, y] > 0:
return x, y
def iscontiguous(self):
"""Return true if every land is connected to every other."""
# Check if there's at least one land. No point handling vacuous truth.
land_area = self.board.count_world_area()
assert land_area > 0
x, y = self.find_land()
return len(self.crawl(x, y, [1, 2, 3, 4, 5, 6])) == land_area
def get_island_border_lands(self, x, y):
land_area_set = set()
island_owner = self.board.data[x, y]
self.crawl(x, y, [island_owner], land_area_set)
border_area_set = set()
for xy in land_area_set:
x1, y1 = xy
for nx, ny in self.board.neighbours(x1, y1):
if self.board.isvalid(nx, ny) \
and self.board.data[nx, ny] != island_owner \
and self.board.data[nx, ny] != 0:
# This works because set can't have duplicates
border_area_set.add((nx, ny))
return border_area_set
def island_size(self, x, y):
"""Count the amount of land of the specified island."""
return len(self.crawl(x, y, [self.board.data[x, y]]))
def crawl(self, x, y, find_list, crawled=None):
"""
x,y -> coordinates to start "crawling"
recursion_set -> set to hold already "crawled" coordinates
find_list -> list of players whose lands are to be searched
"""
crawled = crawled if crawled is not None else set()
if self.board.isvalid(x, y) and \
self.board.data[x, y] in find_list and \
(x, y) not in crawled:
crawled.add((x, y))
# Crawl neighbours
for nx, ny in self.board.neighbours(x, y):
self.crawl(nx, ny, find_list, crawled)
return crawled # places crawled
| gpl-3.0 | -9,116,925,893,547,967,000 | 39.351064 | 78 | 0.561033 | false |
CERNDocumentServer/invenio | modules/weblinkback/lib/weblinkback_dblayer.py | 1 | 13993 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebLinkback - Database Layer"""
from invenio.dbquery import run_sql
from invenio.weblinkback_config import CFG_WEBLINKBACK_STATUS, \
CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME, \
CFG_WEBLINKBACK_DEFAULT_USER, \
CFG_WEBLINKBACK_PAGE_TITLE_STATUS
from invenio.textutils import xml_entities_to_utf8
def get_all_linkbacks(recid=None,
status=None,
order=CFG_WEBLINKBACK_ORDER_BY_INSERTION_TIME["ASC"],
linkback_type=None,
limit=None,
full_count_only=False):
"""
Get all linkbacks
@param recid: of one record, of all if None
@param status: with a certain status, of all if None
@param order: order by insertion time either "ASC" or "DESC"
@param linkback_type: of a certain type, of all if None
@param limit: maximum result count, all if None
@param full_count_only: return only full result count (does not consider "limit"), result set if False
@return [(linkback_id,
origin_url,
recid,
additional_properties,
linkback_type,
linkback_status,
insert_time)]
in order by id, up to "limited" results
OR integer if count_only
"""
header_sql = """SELECT id,
origin_url,
id_bibrec,
additional_properties,
type,
status,
insert_time
FROM lnkENTRY"""
if full_count_only:
header_sql = 'SELECT count(id) FROM lnkENTRY'
conditions = []
params = []
def add_condition(column, value):
if value:
if not conditions:
conditions.append('WHERE %s=%%s' % column)
else:
conditions.append('AND %s=%%s' % column)
params.append(value)
add_condition('id_bibrec', recid)
add_condition('status', status)
add_condition('type', linkback_type)
order_sql = 'ORDER by id %s' % order
limit_sql = ''
if limit:
limit_sql = 'LIMIT %s' % limit
res = run_sql('%s %s %s %s' % (header_sql, ' '.join(conditions), order_sql, limit_sql), tuple(params))
if full_count_only:
return int(res[0][0])
else:
return res
def approve_linkback(linkbackid, user_info):
"""
Approve linkback
@param linkbackid: linkback id
@param user_info: user info
"""
update_linkback_status(linkbackid, CFG_WEBLINKBACK_STATUS['APPROVED'], user_info)
def reject_linkback(linkbackid, user_info):
"""
Reject linkback
@param linkbackid: linkback id
@param user_info: user info
"""
update_linkback_status(linkbackid, CFG_WEBLINKBACK_STATUS['REJECTED'], user_info)
def update_linkback_status(linkbackid, new_status, user_info = None):
"""
Update status of a linkback
@param linkbackid: linkback id
@param new_status: new status
@param user_info: user info
"""
if user_info == None:
user_info = {}
user_info['uid'] = CFG_WEBLINKBACK_DEFAULT_USER
run_sql("""UPDATE lnkENTRY
SET status=%s
WHERE id=%s
""", (new_status, linkbackid))
logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time)
VALUES
(%s, %s, NOW());
SELECT LAST_INSERT_ID();
""", (user_info['uid'], new_status))
run_sql("""INSERT INTO lnkENTRYLOG (id_lnkENTRY , id_lnkLOG)
VALUES
(%s, %s);
""", (linkbackid, logid))
def create_linkback(origin_url, recid, additional_properties, linkback_type, user_info):
"""
Create linkback
@param origin_url: origin URL,
@param recid: recid
@param additional_properties: additional properties
@param linkback_type: linkback type
@param user_info: user info
@return id of the created linkback
"""
linkbackid = run_sql("""INSERT INTO lnkENTRY (origin_url, id_bibrec, additional_properties, type, status, insert_time)
VALUES
(%s, %s, %s, %s, %s, NOW());
SELECT LAST_INSERT_ID();
""", (origin_url, recid, str(additional_properties), linkback_type, CFG_WEBLINKBACK_STATUS['PENDING']))
logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time)
VALUES
(%s, %s, NOW());
SELECT LAST_INSERT_ID();
""", (user_info['uid'], CFG_WEBLINKBACK_STATUS['INSERTED']))
run_sql("""INSERT INTO lnkENTRYLOG (id_lnkENTRY, id_lnkLOG)
VALUES
(%s, %s);
""", (linkbackid, logid))
# add url title entry if necessary
if len(run_sql("""SELECT url
FROM lnkENTRYURLTITLE
WHERE url=%s
""", (origin_url, ))) == 0:
manual_set_title = 0
title = ""
if additional_properties != "" and 'title' in additional_properties.keys():
manual_set_title = 1
title = additional_properties['title']
run_sql("""INSERT INTO lnkENTRYURLTITLE (url, title, manual_set)
VALUES
(%s, %s, %s)
""", (origin_url, title, manual_set_title))
return linkbackid
def get_approved_latest_added_linkbacks(count):
"""
Get approved latest added linkbacks
@param count: count of the linkbacks
@return [(linkback_id,
origin_url,
recid,
additional_properties,
type,
status,
insert_time)]
in descending order by insert_time
"""
return run_sql("""SELECT id,
origin_url,
id_bibrec,
additional_properties,
type,
status,
insert_time
FROM lnkENTRY
WHERE status=%s
ORDER BY insert_time DESC
LIMIT %s
""", (CFG_WEBLINKBACK_STATUS['APPROVED'], count))
def get_url_list(list_type):
"""
@param list_type: of CFG_WEBLINKBACK_LIST_TYPE
@return (url0, ..., urln) in ascending order by url
"""
result = run_sql("""SELECT url
FROM lnkADMINURL
WHERE list=%s
ORDER by url ASC
""", (list_type, ))
return tuple(url[0] for (url) in result)
def get_urls():
"""
Get all URLs and the corresponding listType
@return ((url, CFG_WEBLINKBACK_LIST_TYPE), ..., (url, CFG_WEBLINKBACK_LIST_TYPE)) in ascending order by url
"""
return run_sql("""SELECT url, list
FROM lnkADMINURL
ORDER by url ASC
""")
def url_exists(url, list_type=None):
"""
Check if url exists
@param url
@param list_type: specific list of CFG_WEBLINKBACK_LIST_TYPE, all if None
@return True or False
"""
header_sql = """SELECT url
FROM lnkADMINURL
WHERE url=%s
"""
optional_sql = " AND list=%s"
result = None
if list_type:
result = run_sql(header_sql + optional_sql, (url, list_type))
else:
result = run_sql(header_sql, (url, ))
if result != ():
return True
else:
return False
def add_url_to_list(url, list_type, user_info):
"""
Add a URL to a list
@param url: unique URL string for all lists
@param list_type: of CFG_WEBLINKBACK_LIST_TYPE
@param user_info: user info
@return id of the created url
"""
urlid = run_sql("""INSERT INTO lnkADMINURL (url, list)
VALUES
(%s, %s);
SELECT LAST_INSERT_ID();
""", (url, list_type))
logid = run_sql("""INSERT INTO lnkLOG (id_user, action, log_time)
VALUES
(%s, %s, NOW());
SELECT LAST_INSERT_ID();
""", (user_info['uid'], CFG_WEBLINKBACK_STATUS['INSERTED']))
run_sql("""INSERT INTO lnkADMINURLLOG (id_lnkADMINURL, id_lnkLOG)
VALUES
(%s, %s);
""", (urlid, logid))
return urlid
def remove_url(url):
"""
Remove a URL from list
@param url: unique URL string for all lists
"""
# get ids
urlid = run_sql("""SELECT id
FROM lnkADMINURL
WHERE url=%s
""", (url, ))[0][0]
logids = run_sql("""SELECT log.id
FROM lnkLOG log
JOIN lnkADMINURLLOG url_log
ON log.id=url_log.id_lnkLOG
WHERE url_log.id_lnkADMINURL=%s
""", (urlid, ))
# delete url and url log
run_sql("""DELETE FROM lnkADMINURL
WHERE id=%s;
DELETE FROM lnkADMINURLLOG
WHERE id_lnkADMINURL=%s
""", (urlid, urlid))
# delete log
for logid in logids:
run_sql("""DELETE FROM lnkLOG
WHERE id=%s
""", (logid[0], ))
def get_urls_and_titles(title_status=None):
"""
Get URLs and their corresponding title
@param old_new: of CFG_WEBLINKBACK_PAGE_TITLE_STATUS or None
@return ((url, title, manual_set),...), all rows of the table if None
"""
top_query = """SELECT url, title, manual_set, broken_count
FROM lnkENTRYURLTITLE
WHERE
"""
where_sql = ""
if title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['NEW']:
where_sql = " title='' AND manual_set=0 AND"
elif title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['OLD']:
where_sql = " title<>'' AND manual_set=0 AND"
elif title_status == CFG_WEBLINKBACK_PAGE_TITLE_STATUS['MANUALLY_SET']:
where_sql = " manual_set=1 AND"
where_sql += " broken=0"
return run_sql(top_query + where_sql)
def update_url_title(url, title):
"""
Update the corresponding title of a URL
@param url: URL
@param title: new title
"""
run_sql("""UPDATE lnkENTRYURLTITLE
SET title=%s,
manual_set=0,
broken_count=0,
broken=0
WHERE url=%s
""", (title, url))
def remove_url_title(url):
"""
Remove URL title
@param url: URL
"""
run_sql("""DELETE FROM lnkENTRYURLTITLE
WHERE url=%s
""", (url, ))
def set_url_broken(url):
"""
Set URL broken
@param url: URL
"""
linkbackids = run_sql("""SELECT id
FROM lnkENTRY
WHERE origin_url=%s
""", (url, ))
run_sql("""UPDATE lnkENTRYURLTITLE
SET title=%s,
broken=1
WHERE url=%s
""", (CFG_WEBLINKBACK_STATUS['BROKEN'], url))
# update all linkbacks
for linkbackid in linkbackids:
update_linkback_status(linkbackid[0], CFG_WEBLINKBACK_STATUS['BROKEN'])
def get_url_title(url):
"""
Get URL title or URL if title does not exist (empty string)
@param url: URL
@return title or URL if titles does not exist (empty string)
"""
title = run_sql("""SELECT title
FROM lnkENTRYURLTITLE
WHERE url=%s and title<>"" and broken=0
""", (url, ))
res = url
if len(title) != 0:
res = title[0][0]
return xml_entities_to_utf8(res)
def increment_broken_count(url):
"""
Increment broken count a URL
@param url: URL
"""
run_sql("""UPDATE lnkENTRYURLTITLE
SET broken_count=broken_count+1
WHERE url=%s
""", (url, ))
def remove_linkback(linkbackid):
"""
Remove a linkback database
@param linkbackid: unique URL string for all lists
"""
# get ids
logids = run_sql("""SELECT log.id
FROM lnkLOG log
JOIN lnkENTRYLOG entry_log
ON log.id=entry_log.id_lnkLOG
WHERE entry_log.id_lnkENTRY=%s
""", (linkbackid, ))
# delete linkback entry and entry log
run_sql("""DELETE FROM lnkENTRY
WHERE id=%s;
DELETE FROM lnkENTRYLOG
WHERE id_lnkENTRY=%s
""", (linkbackid, linkbackid))
# delete log
for logid in logids:
run_sql("""DELETE FROM lnkLOG
WHERE id=%s
""", (logid[0], ))
| gpl-2.0 | -5,230,928,770,171,948,000 | 30.874715 | 128 | 0.517616 | false |
mburakergenc/Malware-Detection-using-Machine-Learning | cuckoo/analyzer/windows/analyzer.py | 1 | 30957 | # Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import datetime
import os
import sys
import socket
import struct
import pkgutil
import logging
import hashlib
import threading
import traceback
import urllib
import urllib2
import xmlrpclib
from lib.api.process import Process
from lib.common.abstracts import Package, Auxiliary
from lib.common.constants import SHUTDOWN_MUTEX
from lib.common.defines import KERNEL32
from lib.common.exceptions import CuckooError, CuckooPackageError
from lib.common.exceptions import CuckooDisableModule
from lib.common.hashing import hash_file
from lib.common.rand import random_string
from lib.common.results import upload_to_host
from lib.core.config import Config
from lib.core.packages import choose_package
from lib.core.pipe import PipeServer, PipeForwarder, PipeDispatcher
from lib.core.privileges import grant_debug_privilege
from lib.core.startup import init_logging, set_clock
from modules import auxiliary
log = logging.getLogger("analyzer")
class Files(object):
PROTECTED_NAMES = ()
def __init__(self):
self.files = {}
self.files_orig = {}
self.dumped = []
def is_protected_filename(self, file_name):
"""Do we want to inject into a process with this name?"""
return file_name.lower() in self.PROTECTED_NAMES
def add_pid(self, filepath, pid, verbose=True):
"""Tracks a process identifier for this file."""
if not pid or filepath.lower() not in self.files:
return
if pid not in self.files[filepath.lower()]:
self.files[filepath.lower()].append(pid)
verbose and log.info("Added pid %s for %r", pid, filepath)
def add_file(self, filepath, pid=None):
"""Add filepath to the list of files and track the pid."""
if filepath.lower() not in self.files:
log.info(
"Added new file to list with pid %s and path %s",
pid, filepath
)
self.files[filepath.lower()] = []
self.files_orig[filepath.lower()] = filepath
self.add_pid(filepath, pid, verbose=False)
def dump_file(self, filepath):
"""Dump a file to the host."""
if not os.path.isfile(filepath):
log.warning("File at path \"%r\" does not exist, skip.", filepath)
return False
# Check whether we've already dumped this file - in that case skip it.
try:
sha256 = hash_file(hashlib.sha256, filepath)
if sha256 in self.dumped:
return
except IOError as e:
log.info("Error dumping file from path \"%s\": %s", filepath, e)
return
filename = "%s_%s" % (sha256[:16], os.path.basename(filepath))
upload_path = os.path.join("files", filename)
try:
upload_to_host(
# If available use the original filepath, the one that is
# not lowercased.
self.files_orig.get(filepath.lower(), filepath),
upload_path, self.files.get(filepath.lower(), [])
)
self.dumped.append(sha256)
except (IOError, socket.error) as e:
log.error(
"Unable to upload dropped file at path \"%s\": %s",
filepath, e
)
def delete_file(self, filepath, pid=None):
"""A file is about to removed and thus should be dumped right away."""
self.add_pid(filepath, pid)
self.dump_file(filepath)
# Remove the filepath from the files list.
self.files.pop(filepath.lower(), None)
self.files_orig.pop(filepath.lower(), None)
def move_file(self, oldfilepath, newfilepath, pid=None):
"""A file will be moved - track this change."""
self.add_pid(oldfilepath, pid)
if oldfilepath.lower() in self.files:
# Replace the entry with the new filepath.
self.files[newfilepath.lower()] = \
self.files.pop(oldfilepath.lower(), [])
def dump_files(self):
"""Dump all pending files."""
while self.files:
self.delete_file(self.files.keys()[0])
class ProcessList(object):
def __init__(self):
self.pids = []
self.pids_notrack = []
def add_pid(self, pid, track=True):
"""Add a process identifier to the process list.
Track determines whether the analyzer should be monitoring this
process, i.e., whether Cuckoo should wait for this process to finish.
"""
if int(pid) not in self.pids and int(pid) not in self.pids_notrack:
if track:
self.pids.append(int(pid))
else:
self.pids_notrack.append(int(pid))
def add_pids(self, pids):
"""Add one or more process identifiers to the process list."""
if isinstance(pids, (tuple, list)):
for pid in pids:
self.add_pid(pid)
else:
self.add_pid(pids)
def has_pid(self, pid, notrack=True):
"""Is this process identifier being tracked?"""
if int(pid) in self.pids:
return True
if notrack and int(pid) in self.pids_notrack:
return True
return False
def remove_pid(self, pid):
"""Remove a process identifier from being tracked."""
if pid in self.pids:
self.pids.remove(pid)
if pid in self.pids_notrack:
self.pids_notrack.remove(pid)
class CommandPipeHandler(object):
"""Pipe Handler.
This class handles the notifications received through the Pipe Server and
decides what to do with them.
"""
ignore_list = dict(pid=[])
def __init__(self, analyzer):
self.analyzer = analyzer
self.tracked = {}
def _handle_debug(self, data):
"""Debug message from the monitor."""
log.debug(data)
def _handle_info(self, data):
"""Regular message from the monitor."""
log.info(data)
def _handle_warning(self, data):
"""Warning message from the monitor."""
log.warning(data)
def _handle_critical(self, data):
"""Critical message from the monitor."""
log.critical(data)
def _handle_loaded(self, data):
"""The monitor has loaded into a particular process."""
if not data or data.count(",") != 1:
log.warning("Received loaded command with incorrect parameters, "
"skipping it.")
return
pid, track = data.split(",")
if not pid.isdigit() or not track.isdigit():
log.warning("Received loaded command with incorrect parameters, "
"skipping it.")
return
self.analyzer.process_lock.acquire()
self.analyzer.process_list.add_pid(int(pid), track=int(track))
self.analyzer.process_lock.release()
log.debug("Loaded monitor into process with pid %s", pid)
def _handle_getpids(self, data):
"""Return the process identifiers of the agent and its parent
process."""
return struct.pack("II", self.analyzer.pid, self.analyzer.ppid)
def _inject_process(self, process_id, thread_id, mode):
"""Helper function for injecting the monitor into a process."""
# We acquire the process lock in order to prevent the analyzer to
# terminate the analysis while we are operating on the new process.
self.analyzer.process_lock.acquire()
# Set the current DLL to the default one provided at submission.
dll = self.analyzer.default_dll
if process_id in (self.analyzer.pid, self.analyzer.ppid):
if process_id not in self.ignore_list["pid"]:
log.warning("Received request to inject Cuckoo processes, "
"skipping it.")
self.ignore_list["pid"].append(process_id)
self.analyzer.process_lock.release()
return
# We inject the process only if it's not being monitored already,
# otherwise we would generated polluted logs (if it wouldn't crash
# horribly to start with).
if self.analyzer.process_list.has_pid(process_id):
# This pid is already on the notrack list, move it to the
# list of tracked pids.
if not self.analyzer.process_list.has_pid(process_id, notrack=False):
log.debug("Received request to inject pid=%d. It was already "
"on our notrack list, moving it to the track list.")
self.analyzer.process_list.remove_pid(process_id)
self.analyzer.process_list.add_pid(process_id)
self.ignore_list["pid"].append(process_id)
# Spit out an error once and just ignore it further on.
elif process_id not in self.ignore_list["pid"]:
log.debug("Received request to inject pid=%d, but we are "
"already injected there.", process_id)
self.ignore_list["pid"].append(process_id)
# We're done operating on the processes list, release the lock.
self.analyzer.process_lock.release()
return
# Open the process and inject the DLL. Hope it enjoys it.
proc = Process(pid=process_id, tid=thread_id)
filename = os.path.basename(proc.get_filepath())
if not self.analyzer.files.is_protected_filename(filename):
# Add the new process ID to the list of monitored processes.
self.analyzer.process_list.add_pid(process_id)
# We're done operating on the processes list,
# release the lock. Let the injection do its thing.
self.analyzer.process_lock.release()
# If we have both pid and tid, then we can use APC to inject.
if process_id and thread_id:
proc.inject(dll, apc=True, mode="%s" % mode)
else:
proc.inject(dll, apc=False, mode="%s" % mode)
log.info("Injected into process with pid %s and name %r",
proc.pid, filename)
def _handle_process(self, data):
"""Request for injection into a process."""
# Parse the process identifier.
if not data or not data.isdigit():
log.warning("Received PROCESS command from monitor with an "
"incorrect argument.")
return
return self._inject_process(int(data), None, 0)
def _handle_process2(self, data):
"""Request for injection into a process using APC."""
# Parse the process and thread identifier.
if not data or data.count(",") != 2:
log.warning("Received PROCESS2 command from monitor with an "
"incorrect argument.")
return
pid, tid, mode = data.split(",")
if not pid.isdigit() or not tid.isdigit() or not mode.isdigit():
log.warning("Received PROCESS2 command from monitor with an "
"incorrect argument.")
return
return self._inject_process(int(pid), int(tid), int(mode))
def _handle_file_new(self, data):
"""Notification of a new dropped file."""
self.analyzer.files.add_file(data.decode("utf8"), self.pid)
def _handle_file_del(self, data):
"""Notification of a file being removed - we have to dump it before
it's being removed."""
self.analyzer.files.delete_file(data.decode("utf8"), self.pid)
def _handle_file_move(self, data):
"""A file is being moved - track these changes."""
if "::" not in data:
log.warning("Received FILE_MOVE command from monitor with an "
"incorrect argument.")
return
old_filepath, new_filepath = data.split("::", 1)
self.analyzer.files.move_file(
old_filepath.decode("utf8"), new_filepath.decode("utf8"), self.pid
)
def _handle_kill(self, data):
"""A process is being killed."""
if not data.isdigit():
log.warning("Received KILL command with an incorrect argument.")
return
if self.analyzer.config.options.get("procmemdump"):
Process(pid=int(data)).dump_memory()
def _handle_dumpmem(self, data):
"""Dump the memory of a process as it is right now."""
if not data.isdigit():
log.warning("Received DUMPMEM command with an incorrect argument.")
return
Process(pid=int(data)).dump_memory()
def _handle_dumpreqs(self, data):
if not data.isdigit():
log.warning("Received DUMPREQS command with an incorrect argument %r.", data)
return
pid = int(data)
if pid not in self.tracked:
log.warning("Received DUMPREQS command but there are no reqs for pid %d.", pid)
return
dumpreqs = self.tracked[pid].get("dumpreq", [])
for addr, length in dumpreqs:
log.debug("tracked dump req (%r, %r, %r)", pid, addr, length)
if not addr or not length:
continue
Process(pid=pid).dump_memory_block(int(addr), int(length))
def _handle_track(self, data):
if not data.count(":") == 2:
log.warning("Received TRACK command with an incorrect argument %r.", data)
return
pid, scope, params = data.split(":", 2)
pid = int(pid)
paramtuple = params.split(",")
if pid not in self.tracked:
self.tracked[pid] = {}
if scope not in self.tracked[pid]:
self.tracked[pid][scope] = []
self.tracked[pid][scope].append(paramtuple)
def dispatch(self, data):
response = "NOPE"
if not data or ":" not in data:
log.critical("Unknown command received from the monitor: %r",
data.strip())
else:
# Backwards compatibility (old syntax is, e.g., "FILE_NEW:" vs the
# new syntax, e.g., "1234:FILE_NEW:").
if data[0].isupper():
command, arguments = data.strip().split(":", 1)
self.pid = None
else:
self.pid, command, arguments = data.strip().split(":", 2)
fn = getattr(self, "_handle_%s" % command.lower(), None)
if not fn:
log.critical("Unknown command received from the monitor: %r",
data.strip())
else:
try:
response = fn(arguments)
except:
log.exception(
"Pipe command handler exception occurred (command "
"%s args %r).", command, arguments
)
return response
class Analyzer(object):
"""Cuckoo Windows Analyzer.
This class handles the initialization and execution of the analysis
procedure, including handling of the pipe server, the auxiliary modules and
the analysis packages.
"""
def __init__(self):
self.config = None
self.target = None
self.do_run = True
self.time_counter = 0
self.process_lock = threading.Lock()
self.default_dll = None
self.pid = os.getpid()
self.ppid = Process(pid=self.pid).get_parent_pid()
self.files = Files()
self.process_list = ProcessList()
self.package = None
self.reboot = []
def prepare(self):
"""Prepare env for analysis."""
# Get SeDebugPrivilege for the Python process. It will be needed in
# order to perform the injections.
grant_debug_privilege()
# Initialize logging.
init_logging()
# Parse the analysis configuration file generated by the agent.
self.config = Config(cfg="analysis.conf")
# Pass the configuration through to the Process class.
Process.set_config(self.config)
# Set virtual machine clock.
set_clock(datetime.datetime.strptime(
self.config.clock, "%Y%m%dT%H:%M:%S"
))
# Set the default DLL to be used for this analysis.
self.default_dll = self.config.options.get("dll")
# If a pipe name has not set, then generate a random one.
if "pipe" in self.config.options:
self.config.pipe = "\\\\.\\PIPE\\%s" % self.config.options["pipe"]
else:
self.config.pipe = "\\\\.\\PIPE\\%s" % random_string(16, 32)
# Generate a random name for the logging pipe server.
self.config.logpipe = "\\\\.\\PIPE\\%s" % random_string(16, 32)
# Initialize and start the Command Handler pipe server. This is going
# to be used for communicating with the monitored processes.
self.command_pipe = PipeServer(PipeDispatcher, self.config.pipe,
message=True,
dispatcher=CommandPipeHandler(self))
self.command_pipe.daemon = True
self.command_pipe.start()
# Initialize and start the Log Pipe Server - the log pipe server will
# open up a pipe that monitored processes will use to send logs to
# before they head off to the host machine.
destination = self.config.ip, self.config.port
self.log_pipe_server = PipeServer(PipeForwarder, self.config.logpipe,
destination=destination)
self.log_pipe_server.daemon = True
self.log_pipe_server.start()
# We update the target according to its category. If it's a file, then
# we store the target path.
if self.config.category == "file":
self.target = os.path.join(os.environ["TEMP"] + os.sep,
self.config.file_name)
# If it's a URL, well.. we store the URL.
else:
self.target = self.config.target
def stop(self):
"""Allows an auxiliary module to stop the analysis."""
self.do_run = False
def complete(self):
"""End analysis."""
# Stop the Pipe Servers.
self.command_pipe.stop()
self.log_pipe_server.stop()
# Dump all the notified files.
self.files.dump_files()
# Hell yeah.
log.info("Analysis completed.")
def run(self):
"""Run analysis.
@return: operation status.
"""
self.prepare()
self.path = os.getcwd()
log.debug("Starting analyzer from: %s", self.path)
log.debug("Pipe server name: %s", self.config.pipe)
log.debug("Log pipe server name: %s", self.config.logpipe)
# If no analysis package was specified at submission, we try to select
# one automatically.
if not self.config.package:
log.debug("No analysis package specified, trying to detect "
"it automagically.")
# If the analysis target is a file, we choose the package according
# to the file format.
if self.config.category == "file":
package = choose_package(self.config.file_type,
self.config.file_name,
self.config.pe_exports.split(","))
# If it's an URL, we'll just use the default Internet Explorer
# package.
else:
package = "ie"
# If we weren't able to automatically determine the proper package,
# we need to abort the analysis.
if not package:
raise CuckooError("No valid package available for file "
"type: {0}".format(self.config.file_type))
log.info("Automatically selected analysis package \"%s\"", package)
# Otherwise just select the specified package.
else:
package = self.config.package
# Generate the package path.
package_name = "modules.packages.%s" % package
# Try to import the analysis package.
try:
__import__(package_name, globals(), locals(), ["dummy"], -1)
# If it fails, we need to abort the analysis.
except ImportError:
raise CuckooError("Unable to import package \"{0}\", does "
"not exist.".format(package_name))
# Initialize the package parent abstract.
Package()
# Enumerate the abstract subclasses.
try:
package_class = Package.__subclasses__()[0]
except IndexError as e:
raise CuckooError("Unable to select package class "
"(package={0}): {1}".format(package_name, e))
# Initialize the analysis package.
self.package = package_class(self.config.options, analyzer=self)
# Move the sample to the current working directory as provided by the
# task - one is able to override the starting path of the sample.
# E.g., for some samples it might be useful to run from %APPDATA%
# instead of %TEMP%.
if self.config.category == "file":
self.target = self.package.move_curdir(self.target)
# Initialize Auxiliary modules
Auxiliary()
prefix = auxiliary.__name__ + "."
for loader, name, ispkg in pkgutil.iter_modules(auxiliary.__path__, prefix):
if ispkg:
continue
# Import the auxiliary module.
try:
__import__(name, globals(), locals(), ["dummy"], -1)
except ImportError as e:
log.warning("Unable to import the auxiliary module "
"\"%s\": %s", name, e)
# Walk through the available auxiliary modules.
aux_enabled, aux_avail = [], []
for module in Auxiliary.__subclasses__():
# Try to start the auxiliary module.
try:
aux = module(options=self.config.options, analyzer=self)
aux_avail.append(aux)
aux.start()
except (NotImplementedError, AttributeError):
log.warning("Auxiliary module %s was not implemented",
module.__name__)
except CuckooDisableModule:
continue
except Exception as e:
log.warning("Cannot execute auxiliary module %s: %s",
module.__name__, e)
else:
log.debug("Started auxiliary module %s",
module.__name__)
aux_enabled.append(aux)
# Start analysis package. If for any reason, the execution of the
# analysis package fails, we have to abort the analysis.
try:
pids = self.package.start(self.target)
except NotImplementedError:
raise CuckooError(
"The package \"%s\" doesn't contain a run function." %
package_name
)
except CuckooPackageError as e:
raise CuckooError(
"The package \"%s\" start function raised an error: %s" %
(package_name, e)
)
except Exception as e:
raise CuckooError(
"The package \"%s\" start function encountered an unhandled "
"exception: %s" % (package_name, e)
)
# If the analysis package returned a list of process identifiers, we
# add them to the list of monitored processes and enable the process monitor.
if pids:
self.process_list.add_pids(pids)
pid_check = True
# If the package didn't return any process ID (for example in the case
# where the package isn't enabling any behavioral analysis), we don't
# enable the process monitor.
else:
log.info("No process IDs returned by the package, running "
"for the full timeout.")
pid_check = False
# Check in the options if the user toggled the timeout enforce. If so,
# we need to override pid_check and disable process monitor.
if self.config.enforce_timeout:
log.info("Enabled timeout enforce, running for the full timeout.")
pid_check = False
while self.do_run:
self.time_counter += 1
if self.time_counter == int(self.config.timeout):
log.info("Analysis timeout hit, terminating analysis.")
break
# If the process lock is locked, it means that something is
# operating on the list of monitored processes. Therefore we
# cannot proceed with the checks until the lock is released.
if self.process_lock.locked():
KERNEL32.Sleep(1000)
continue
try:
# If the process monitor is enabled we start checking whether
# the monitored processes are still alive.
if pid_check:
for pid in self.process_list.pids:
if not Process(pid=pid).is_alive():
log.info("Process with pid %s has terminated", pid)
self.process_list.remove_pid(pid)
# If none of the monitored processes are still alive, we
# can terminate the analysis.
if not self.process_list.pids:
log.info("Process list is empty, "
"terminating analysis.")
break
# Update the list of monitored processes available to the
# analysis package. It could be used for internal
# operations within the module.
self.package.set_pids(self.process_list.pids)
try:
# The analysis packages are provided with a function that
# is executed at every loop's iteration. If such function
# returns False, it means that it requested the analysis
# to be terminate.
if not self.package.check():
log.info("The analysis package requested the "
"termination of the analysis.")
break
# If the check() function of the package raised some exception
# we don't care, we can still proceed with the analysis but we
# throw a warning.
except Exception as e:
log.warning("The package \"%s\" check function raised "
"an exception: %s", package_name, e)
finally:
# Zzz.
KERNEL32.Sleep(1000)
if not self.do_run:
log.debug("The analyzer has been stopped on request by an "
"auxiliary module.")
# Create the shutdown mutex.
KERNEL32.CreateMutexA(None, False, SHUTDOWN_MUTEX)
try:
# Before shutting down the analysis, the package can perform some
# final operations through the finish() function.
self.package.finish()
except Exception as e:
log.warning("The package \"%s\" finish function raised an "
"exception: %s", package_name, e)
try:
# Upload files the package created to package_files in the
# results folder.
for path, name in self.package.package_files() or []:
upload_to_host(path, os.path.join("package_files", name))
except Exception as e:
log.warning("The package \"%s\" package_files function raised an "
"exception: %s", package_name, e)
# Terminate the Auxiliary modules.
for aux in aux_enabled:
try:
aux.stop()
except (NotImplementedError, AttributeError):
continue
except Exception as e:
log.warning("Cannot terminate auxiliary module %s: %s",
aux.__class__.__name__, e)
if self.config.terminate_processes:
# Try to terminate remaining active processes.
log.info("Terminating remaining processes before shutdown.")
for pid in self.process_list.pids:
proc = Process(pid=pid)
if proc.is_alive():
try:
proc.terminate()
except:
continue
# Run the finish callback of every available Auxiliary module.
for aux in aux_avail:
try:
aux.finish()
except (NotImplementedError, AttributeError):
continue
except Exception as e:
log.warning("Exception running finish callback of auxiliary "
"module %s: %s", aux.__class__.__name__, e)
# Let's invoke the completion procedure.
self.complete()
return True
if __name__ == "__main__":
success = False
error = ""
try:
# Initialize the main analyzer class.
analyzer = Analyzer()
# Run it and wait for the response.
success = analyzer.run()
data = {
"status": "complete",
"description": success,
}
# This is not likely to happen.
except KeyboardInterrupt:
error = "Keyboard Interrupt"
# If the analysis process encountered a critical error, it will raise a
# CuckooError exception, which will force the termination of the analysis.
# Notify the agent of the failure. Also catch unexpected exceptions.
except Exception as e:
# Store the error.
error_exc = traceback.format_exc()
error = "%s\n%s" % (e, error_exc)
# Just to be paranoid.
if len(log.handlers):
log.exception(error_exc)
else:
sys.stderr.write("{0}\n".format(error_exc))
data = {
"status": "exception",
"description": error_exc,
}
finally:
# Report that we're finished. First try with the XML RPC thing and
# if that fails, attempt the new Agent.
try:
server = xmlrpclib.Server("http://127.0.0.1:8000")
server.complete(success, error, "unused_path")
except xmlrpclib.ProtocolError:
urllib2.urlopen("http://127.0.0.1:8000/status",
urllib.urlencode(data)).read()
| mit | -527,626,930,496,597,250 | 37.030713 | 91 | 0.565656 | false |
lem9/weblate | openshift/wsgi_install.py | 1 | 3249 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2014 Daniel Tschan <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import os
from string import Template
VIRTUALENV = os.path.join(
os.environ['OPENSHIFT_PYTHON_DIR'], 'virtenv', 'bin', 'activate_this.py'
)
with open(VIRTUALENV) as handle:
code = compile(handle.read(), 'activate_this.py', 'exec')
exec(code, dict(__file__=VIRTUALENV)) # noqa
def application(environ, start_response):
ctype = 'text/html'
response_body = Template('''<!doctype html>
<html lang="en">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta charset="utf-8">
<title>Installing Weblate</title>
<style>
html {
background: #f5f5f5;
height: 100%;
}
body {
color: #404040;
font-family: "Helvetica Neue",Helvetica,"Liberation Sans",Arial,sans-serif;
font-size: 14px;
line-height: 1.4;
}
h1 {
color: #000;
line-height: 1.38em;
margin: .4em 0 .5em;
font-size: 25px;
font-weight: 300;
border-bottom: 1px solid #fff;
}
h1:after {
content: "";
display: block;
height: 1px;
background-color: #ddd;
}
p {
margin: 0 0 2em;
}
pre {
padding: 13.333px 20px;
margin: 0 0 20px;
font-size: 13px;
line-height: 1.4;
background-color: #fff;
border-left: 2px solid rgba(120,120,120,0.35);
font-family: Menlo,Monaco,"Liberation Mono",Consolas,monospace !important;
}
.content {
display: table;
margin-left: -15px;
margin-right: -15px;
position: relative;
min-height: 1px;
padding-left: 30px;
padding-right: 30px;
}
</style>
</head>
<body>
<div class="content">
<h1>$action1 Weblate</h1>
<p>
Weblate is being $action2.
Please wait a few minutes and refresh this page.
</p>
$log
</div>
</body>
</html>''')
context = {}
if os.path.exists(os.environ['OPENSHIFT_DATA_DIR'] + '/.installed'):
context['action1'] = 'Updating'
context['action2'] = 'updated'
context['log'] = ''
else:
context['action1'] = 'Installing'
context['action2'] = 'installed'
log_msg = os.popen(
r"cat ${OPENSHIFT_PYTHON_LOG_DIR}/install.log |"
r" grep '^[^ ]\|setup.py install' |"
r" sed 's,/var/lib/openshift/[a-z0-9]\{24\},~,g'"
).read()
context['log'] = '<pre>' + log_msg + '</pre>'
response_body = response_body.substitute(context)
status = '200 OK'
response_headers = [
('Content-Type', ctype),
('Content-Length', str(len(response_body)))
]
start_response(status, response_headers)
return [response_body]
| gpl-3.0 | -2,372,140,518,733,032,000 | 24.178295 | 77 | 0.641933 | false |
gibsjose/SpectrumSite | Utilities/SteeringGenerator.py | 1 | 3470 | #!/usr/bin/python
import sys
# Input dictionary from KVP file
d = {}
# Defaults dictionary
defaults = {}
# Checks whether the key exists in the input dictionary
def Write(_key, _file):
if _key in d:
if not d[_key].strip():
_file.write("; ")
_file.write(_key + ' = ' + d[_key])
elif _key in defaults:
_file.write(_key + ' = ' + defaults[_key] + '\n')
inputPath = sys.argv[1]
outputPath = sys.argv[2]
# Open the key-value-pair file and create a dictionary out of it
#with open(inputPath, 'r') as f:
#print "inputPath = " inputPath
f = open(inputPath, 'r')
for line in f:
# Split the line based on the '='
(key, val) = line.split(' = ')
# Strip newlines from the value
val.rstrip('\n');
#Store in the dictionary
d[key] = val
#Close the file
f.close()
# Create default dictionary
# [GEN]
defaults['debug'] = 'false'
# [GRAPH]
defaults['plot_band'] = 'false'
defaults['plot_error_ticks'] = 'false'
defaults['plot_marker'] = 'true'
defaults['plot_staggered'] = 'true'
defaults['match_binning'] = 'true'
defaults['grid_corr'] = 'false'
defaults['label_sqrt_s'] = 'true'
defaults['x_legend'] = '0.9'
defaults['y_legend'] = '0.9'
# defaults['y_overlay_min'] = ''
# defaults['y_overlay_max'] = ''
# defaults['y_ratio_min'] = ''
# defaults['y_ratio_max'] = ''
defaults['band_with_pdf'] = 'true'
defaults['band_with_alphas'] = 'false'
defaults['band_with_scale'] = 'false'
defaults['band_total'] = 'false'
# [PLOT_0]
defaults['plot_type'] = 'data, grid, pdf'
defaults['desc'] = ''
defaults['data_directory'] = '.'
defaults['grid_directory'] = '.'
defaults['pdf_directory'] = '.'
defaults['data_steering_files'] = 'none'
defaults['grid_steering_files'] = 'none'
defaults['pdf_steering_files'] = 'none'
# defaults['data_marker_style'] = '20'
# defaults['data_marker_color'] = '1'
# defaults['pdf_fill_style'] = ''
# defaults['pdf_fill_color'] = ''
# defaults['pdf_marker_style'] = ''
defaults['x_scale'] = '1.0'
defaults['y_scale'] = '1.0'
defaults['x_log'] = 'true'
defaults['y_log'] = 'true'
defaults['display_style'] = 'overlay'
defaults['overlay_style'] = 'data, convolute'
defaults['ratio_title'] = 'Ratio'
# Write the Steering File
#with open(outputPath, 'w') as f:
f = open(outputPath, 'w')
# [GEN]
f.write('[GEN]\n')
Write('debug', f)
# [GRAPH]
f.write('\n[GRAPH]\n')
Write('plot_band', f)
Write('plot_error_ticks', f)
Write('plot_marker', f)
Write('plot_staggered', f)
Write('match_binning', f)
Write('grid_corr', f)
Write('label_sqrt_s', f)
Write('x_legend', f)
Write('y_legend', f)
Write('y_overlay_min', f)
Write('y_overlay_max', f)
Write('y_ratio_min', f)
Write('y_ratio_max', f)
Write('band_with_pdf', f)
Write('band_with_alphas', f)
Write('band_with_scale', f)
Write('band_total', f)
# [PLOT_0]
f.write('\n[PLOT_0]\n')
Write('plot_type', f)
Write('desc', f)
Write('data_directory', f)
Write('grid_directory', f)
Write('pdf_directory', f)
Write('data_steering_files', f)
Write('grid_steering_files', f)
Write('pdf_steering_files', f)
Write('data_marker_style', f)
Write('data_marker_color', f)
Write('pdf_fill_style', f)
Write('pdf_fill_color', f)
Write('pdf_marker_style', f)
Write('x_scale', f)
Write('y_scale', f)
Write('x_log', f)
Write('y_log', f)
Write('display_style', f)
Write('overlay_style', f)
Write('ratio_title', f)
#Look for up to 10 ratios
for i in range(0, 10):
rs = 'ratio_style_' + str(i)
r = 'ratio_' + str(i)
Write(rs, f)
Write(r, f)
f.close()
| mit | -3,632,527,644,091,879,400 | 22.605442 | 64 | 0.627089 | false |
mclaughlin6464/pearce | bin/mcmc/pearce_mcmc_xigg_emu1_shot.py | 1 | 2151 | from pearce.emulator import OriginalRecipe, ExtraCrispy, SpicyBuffalo
from pearce.inference import run_mcmc_iterator
import numpy as np
from os import path
import cPickle as pickle
#training_file = '/u/ki/swmclau2/des/xi_cosmo_trainer/PearceRedMagicXiCosmoFixedNd.hdf5'
training_file = '/scratch/users/swmclau2/xi_zheng07_cosmo_lowmsat/PearceRedMagicXiCosmoFixedNd.hdf5'
em_method = 'gp'
split_method = 'random'
load_fixed_params = {'z':0.0}#, 'HOD': 0}
np.random.seed(0)
emu = SpicyBuffalo(training_file, method = em_method, fixed_params=load_fixed_params, custom_mean_function = 'linear', downsample_factor = 0.1)
fixed_params = {}#'f_c':1.0}#,'logM1': 13.8 }# 'z':0.0}
emulation_point = [('logM0', 14.0), ('sigma_logM', 0.2),
('alpha', 1.083),('logM1', 13.7)]#, ('logMmin', 12.233)]
em_params = dict(emulation_point)
em_params.update(fixed_params)
with open('cosmo_param_dict.pkl', 'r') as f:
cosmo_param_dict = pickle.load(f)
y = np.loadtxt('xi_gg_true_jk.npy')
emu1_cov = emu.ycov
shot_cov = np.loadtxt('xi_gg_shot_cov_true.npy')
#jk_cov = np.loadtxt('xi_gg_cov_true_jk.npy')
#sample_cov = np.loadtxt('xigg_scov_log.npy')
cov = emu1_cov + shot_cov
#em_params.update( cosmo_param_dict)
fixed_params.update(em_params)
#fixed_params.update(cosmo_param_dict)
em_params = cosmo_param_dict
param_names = [k for k in em_params.iterkeys() if k not in fixed_params]
nwalkers = 500
nsteps = 20000
nburn = 0
savedir = '/scratch/users/swmclau2/PearceMCMC/'
#chain_fname = path.join(savedir, '%d_walkers_%d_steps_chain_cosmo_zheng_xi_lowmsat.npy'%(nwalkers, nsteps ))
chain_fname = path.join(savedir, '%d_walkers_%d_steps_xigg_m3_1_lin_emu1_shot.npy'%(nwalkers, nsteps))
with open(chain_fname, 'w') as f:
f.write('#' + '\t'.join(param_names)+'\n')
print 'starting mcmc'
rpoints = emu.scale_bin_centers
np.random.seed(0)
for pos in run_mcmc_iterator([emu], param_names, [y], [cov], rpoints, fixed_params = fixed_params,nwalkers = nwalkers,\
nsteps = nsteps):#, nburn = nburn, ncores = 1):#, resume_from_previous = chain_fname):
with open(chain_fname, 'a') as f:
np.savetxt(f, pos)
| mit | -2,329,669,638,456,525,000 | 32.609375 | 143 | 0.686657 | false |
m4rx9/rna-pdb-tools | rna_tools/tools/PyMOL4RNA/libs/show_contacts.py | 1 | 15707 | #!/usr/bin/python
"""
PyMOL plugin that provides show_contacts command and GUI
for highlighting good and bad polar contacts.
Factored out of clustermols by Matthew Baumgartner.
The advantage of this package is it requires many fewer dependencies.
Modified: Marcin Magnus 2020
Source <https://pymolwiki.org/index.php/Pymol-script-repo>
"""
from __future__ import print_function
import sys
import os
from pymol import cmd
print("""show_contacts
-------------------------------------
_polar: good polar interactions according to PyMOL
_polar_ok: compute possibly suboptimal polar interactions using the user specified distance
_aa: acceptors acceptors
_dd: donors donors
_all is all ;-) above!""")
DEBUG=1
def show_contacts(selection='*', selection2='*',
result="contacts",
cutoff=3.6,
bigcutoff = 4.0,
labels=False,
SC_DEBUG = DEBUG):
"""
USAGE
show_contacts selection, selection2, [result=contacts],[cutoff=3.6],[bigcutoff=4.0]
Show various polar contacts, the good, the bad, and the ugly.
Edit MPB 6-26-14: The distances are heavy atom distances, so I upped the default cutoff to 4.0
Returns:
True/False - if False, something went wrong
"""
if SC_DEBUG > 4:
print('Starting show_contacts')
print('selection = "' + selection + '"')
print('selection2 = "' + selection2 + '"')
result = cmd.get_legal_name(result)
#if the group of contacts already exist, delete them
cmd.delete(result)
# ensure only N and O atoms are in the selection
all_don_acc1 = selection + " and (donor or acceptor)"
all_don_acc2 = selection2 + " and (donor or acceptor)"
if SC_DEBUG > 4:
print('all_don_acc1 = "' + all_don_acc1 + '"')
print('all_don_acc2 = "' + all_don_acc2 + '"')
#if theses selections turn out not to have any atoms in them, pymol throws cryptic errors when calling the dist function like:
#'Selector-Error: Invalid selection name'
#So for each one, manually perform the selection and then pass the reference to the distance command and at the end, clean up the selections
#the return values are the count of the number of atoms
all1_sele_count = cmd.select('all_don_acc1_sele', all_don_acc1)
all2_sele_count = cmd.select('all_don_acc2_sele', all_don_acc2)
#print out some warnings
if DEBUG > 3:
if not all1_sele_count:
print('Warning: all_don_acc1 selection empty!')
if not all2_sele_count:
print('Warning: all_don_acc2 selection empty!')
########################################
allres = result + "_all"
if all1_sele_count and all2_sele_count:
#print(allres)
#print(cmd.get_distance(allres, 'all_don_acc1_sele', 'all_don_acc2_sele', bigcutoff, mode = 0))
any = cmd.distance(allres, 'all_don_acc1_sele', 'all_don_acc2_sele', bigcutoff, mode = 0)
# if any is 0 it seems that there is no distance!
if any:
cmd.set("dash_radius", "0.05", allres)
if not labels:
cmd.hide("labels", allres)
else:
# just do nothing and clena up
print('no contacts')
cmd.delete('all_don_acc1_sele')
cmd.delete('all_don_acc2_sele')
cmd.delete(result + "_all")
return None
########################################
# compute good polar interactions according to pymol
polres = result + "_polar"
if all1_sele_count and all2_sele_count:
cmd.distance(polres, 'all_don_acc1_sele', 'all_don_acc2_sele', cutoff, mode = 2) #hopefully this checks angles? Yes
#cmd.set("dash_color", "marine", allres)
#cmd.set('dash_gap', '0')
cmd.set("dash_radius","0.2", polres) #"0.126"
#cmd.set("dash_color", "marine", allres)
if not labels:
cmd.hide("labels", polres)
########################################
# When running distance in mode=2, the cutoff parameter is ignored if set higher then the default of 3.6
# so set it to the passed in cutoff and change it back when you are done.
old_h_bond_cutoff_center = cmd.get('h_bond_cutoff_center') # ideal geometry
old_h_bond_cutoff_edge = cmd.get('h_bond_cutoff_edge') # minimally acceptable geometry
cmd.set('h_bond_cutoff_center', bigcutoff)
cmd.set('h_bond_cutoff_edge', bigcutoff)
# compute possibly suboptimal polar interactions using the user specified distance
pol_ok_res = result + "_polar_ok"
if all1_sele_count and all2_sele_count:
cmd.distance(pol_ok_res, 'all_don_acc1_sele', 'all_don_acc2_sele', bigcutoff, mode = 2)
cmd.set("dash_radius", "0.06", pol_ok_res)
if not labels:
cmd.hide("labels", pol_ok_res)
#now reset the h_bond cutoffs
cmd.set('h_bond_cutoff_center', old_h_bond_cutoff_center)
cmd.set('h_bond_cutoff_edge', old_h_bond_cutoff_edge)
########################################
onlyacceptors1 = selection + " and (acceptor and !donor)"
onlyacceptors2 = selection2 + " and (acceptor and !donor)"
onlydonors1 = selection + " and (!acceptor and donor)"
onlydonors2 = selection2 + " and (!acceptor and donor)"
#perform the selections
onlyacceptors1_sele_count = cmd.select('onlyacceptors1_sele', onlyacceptors1)
onlyacceptors2_sele_count = cmd.select('onlyacceptors2_sele', onlyacceptors2)
onlydonors1_sele_count = cmd.select('onlydonors1_sele', onlydonors1)
onlydonors2_sele_count = cmd.select('onlydonors2_sele', onlydonors2)
#print out some warnings
if SC_DEBUG > 2:
if not onlyacceptors1_sele_count:
print('Warning: onlyacceptors1 selection empty!')
if not onlyacceptors2_sele_count:
print('Warning: onlyacceptors2 selection empty!')
if not onlydonors1_sele_count:
print('Warning: onlydonors1 selection empty!')
if not onlydonors2_sele_count:
print('Warning: onlydonors2 selection empty!')
# acceptors acceptors
accres = result+"_aa"
if onlyacceptors1_sele_count and onlyacceptors2_sele_count:
aa_dist_out = cmd.distance(accres, 'onlyacceptors1_sele', 'onlyacceptors2_sele', cutoff, 0)
if aa_dist_out < 0:
print('\n\nCaught a pymol selection error in acceptor-acceptor selection of show_contacts')
print('accres:', accres)
print('onlyacceptors1', onlyacceptors1)
print('onlyacceptors2', onlyacceptors2)
return False
cmd.set("dash_color","red",accres)
cmd.set("dash_radius","0.125",accres)
if not labels:
cmd.hide("labels", accres)
########################################
# donors donors
donres = result+"_dd"
if onlydonors1_sele_count and onlydonors2_sele_count:
dd_dist_out = cmd.distance(donres, 'onlydonors1_sele', 'onlydonors2_sele', cutoff, 0)
#try to catch the error state
if dd_dist_out < 0:
print('\n\nCaught a pymol selection error in dd selection of show_contacts')
print('donres:', donres)
print('onlydonors1', onlydonors1)
print('onlydonors2', onlydonors2)
print("cmd.distance('" + donres + "', '" + onlydonors1 + "', '" + onlydonors2 + "', " + str(cutoff) + ", 0)")
return False
cmd.set("dash_color","red",donres)
cmd.set("dash_radius","0.125",donres)
if not labels:
cmd.hide("labels", donres)
##########################################################
##### find the buried unpaired atoms of the receptor #####
##########################################################
#initialize the variable for when CALC_SASA is False
unpaired_atoms = ''
## Group
print(allres) # contacts_all
cmd.group(result,"%s %s %s %s %s %s" % (polres, allres, accres, donres, pol_ok_res, unpaired_atoms))
## Clean up the selection objects
#if the show_contacts debug level is high enough, don't delete them.
if SC_DEBUG < 5:
cmd.delete('all_don_acc1_sele')
cmd.delete('all_don_acc2_sele')
cmd.delete('onlyacceptors1_sele')
cmd.delete('onlyacceptors2_sele')
cmd.delete('onlydonors1_sele')
cmd.delete('onlydonors2_sele')
cmd.disable('contacts_all')
cmd.disable('contacts_polar_ok')
cmd.disable('contacts_aa')
cmd.disable('contacts_dd')
return True
cmd.extend('contacts', show_contacts) #contacts to avoid clashing with cluster_mols version
#################################################################################
########################### Start of pymol plugin code ##########################
#################################################################################
about_text = '''show_contacts was factored out of the much more full-featured cluster_mols
by Dr. Matt Baumgartner (https://pymolwiki.org/index.php/Cluster_mols). It provides
an easy way to highlight polar contacts (and clashes) between two selections without
requiring the installation of additional dependencies.
'''
class Show_Contacts:
''' Tk version of the Plugin GUI '''
def __init__(self, app):
parent = app.root
self.parent = parent
self.app = app
import Pmw
############################################################################################
### Open a window with options to select to loaded objects ###
############################################################################################
self.select_dialog = Pmw.Dialog(parent,
buttons = ('Ok','Cancel'),
title = 'Show Contacts Plugin',
command = self.button_pressed )
self.select_dialog.withdraw()
#allow the user to select from objects already loaded in pymol
self.select_object_combo_box = Pmw.ComboBox(self.select_dialog.interior(),
scrolledlist_items=[],
labelpos='w',
label_text='Select loaded object:',
listbox_height = 2,
dropdown=True)
self.select_object_combo_box2 = Pmw.ComboBox(self.select_dialog.interior(),
scrolledlist_items=[],
labelpos='w',
label_text='Select loaded object:',
listbox_height = 2,
dropdown=True)
self.select_object_combo_box.grid(column=1, row=0)
self.select_object_combo_box2.grid(column=2, row=0)
self.populate_ligand_select_list()
self.select_dialog.show()
def button_pressed(self, result):
if hasattr(result,'keycode'):
if result.keycode == 36:
print('keycode:', result.keycode)
elif result == 'Ok' or result == 'Exit' or result == None:
s1 = self.select_object_combo_box.get()
s2 = self.select_object_combo_box2.get()
show_contacts(s1,s2,'%s_%s'%(s1,s2))
self.select_dialog.withdraw()
elif result == 'Cancel' or result == None:
self.select_dialog.withdraw()
def populate_ligand_select_list(self):
''' Go thourgh the loaded objects in PyMOL and add them to the selected list. '''
#get the loaded objects
loaded_objects = _get_select_list()
self.select_object_combo_box.clear()
self.select_object_combo_box2.clear()
for ob in loaded_objects:
self.select_object_combo_box.insert('end', ob)
self.select_object_combo_box2.insert('end', ob)
def _get_select_list():
'''
Get either a list of object names, or a list of chain selections
'''
loaded_objects = [name for name in cmd.get_names('all', 1) if '_cluster_' not in name]
# if single object, try chain selections
if len(loaded_objects) == 1:
chains = cmd.get_chains(loaded_objects[0])
if len(chains) > 1:
loaded_objects = ['{} & chain {}'.format(loaded_objects[0], chain) for chain in chains]
return loaded_objects
class Show_Contacts_Qt_Dialog(object):
''' Qt version of the Plugin GUI '''
def __init__(self):
from pymol.Qt import QtWidgets
dialog = QtWidgets.QDialog()
self.setupUi(dialog)
self.populate_ligand_select_list()
dialog.accepted.connect(self.accept)
dialog.exec_()
def accept(self):
s1 = self.select_object_combo_box.currentText()
s2 = self.select_object_combo_box2.currentText()
show_contacts(s1, s2, '%s_%s' % (s1, s2))
def populate_ligand_select_list(self):
loaded_objects = _get_select_list()
self.select_object_combo_box.clear()
self.select_object_combo_box2.clear()
self.select_object_combo_box.addItems(loaded_objects)
self.select_object_combo_box2.addItems(loaded_objects)
if len(loaded_objects) > 1:
self.select_object_combo_box2.setCurrentIndex(1)
def setupUi(self, Dialog):
# Based on auto-generated code from ui file
from pymol.Qt import QtCore, QtWidgets
Dialog.resize(400, 50)
self.gridLayout = QtWidgets.QGridLayout(Dialog)
label = QtWidgets.QLabel("Select loaded object:", Dialog)
self.gridLayout.addWidget(label, 0, 0, 1, 1)
self.select_object_combo_box = QtWidgets.QComboBox(Dialog)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
self.select_object_combo_box.setSizePolicy(sizePolicy)
self.select_object_combo_box.setEditable(True)
self.gridLayout.addWidget(self.select_object_combo_box, 0, 1, 1, 1)
label = QtWidgets.QLabel("Select loaded object:", Dialog)
self.gridLayout.addWidget(label, 1, 0, 1, 1)
self.select_object_combo_box2 = QtWidgets.QComboBox(Dialog)
self.select_object_combo_box2.setSizePolicy(sizePolicy)
self.select_object_combo_box2.setEditable(True)
self.gridLayout.addWidget(self.select_object_combo_box2, 1, 1, 1, 1)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 2)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
def __init__(self):
try:
from pymol.plugins import addmenuitemqt
addmenuitemqt('Show Contacts', Show_Contacts_Qt_Dialog)
return
except Exception as e:
print(e)
self.menuBar.addmenuitem('Plugin', 'command', 'Show Contacts', label = 'Show Contacts', command = lambda s=self : Show_Contacts(s))
| mit | 992,679,597,646,326,900 | 39.903646 | 144 | 0.569364 | false |
Axelrod-Python/axelrod-evolver | tests/integration/test_cycler_integration.py | 1 | 1543 | import os
import tempfile
import unittest
import axelrod as axl
import axelrod_dojo as axl_dojo
class TestCyclerParams(unittest.TestCase):
def setUp(self):
pass
def test_default_single_opponent_e2e(self):
temp_file = tempfile.NamedTemporaryFile()
# we will set the objective to be
cycler_objective = axl_dojo.prepare_objective(name="score", turns=10, repetitions=1)
# Lets use an opponent_list of just one:
opponent_list = [axl.TitForTat(), axl.Calculator()]
cycler = axl.EvolvableCycler
# params to pass through
cycler_kwargs = {
"cycle_length": 10
}
# assert file is empty to start
self.assertEqual(temp_file.readline(), b'') # note that .readline() reads bytes hence b''
population = axl_dojo.Population(player_class=cycler,
params_kwargs=cycler_kwargs,
size=20,
objective=cycler_objective,
output_filename=temp_file.name,
opponents=opponent_list)
generations = 5
population.run(generations, print_output=False)
# assert the output file exists and is not empty
self.assertTrue(os.path.exists(temp_file.name))
self.assertNotEqual(temp_file.readline(), b'') # note that .readline() reads bytes hence b''
# close the temp file
temp_file.close()
| mit | 2,311,307,298,383,570,400 | 33.288889 | 101 | 0.573558 | false |
airelil/pywinauto | pywinauto/findbestmatch.py | 1 | 20748 | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to find the closest match of a string in a list"""
from __future__ import unicode_literals
import re
import difflib
import six
#import ctypes
#import ldistance
#levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance
#levenshtein_distance = ldistance.distance
find_best_control_match_cutoff = .6
#====================================================================
class MatchError(IndexError):
"""A suitable match could not be found"""
def __init__(self, items = None, tofind = ''):
"""Init the parent with the message"""
self.tofind = tofind
self.items = items
if self.items is None:
self.items = []
IndexError.__init__(self,
"Could not find '{0}' in '{1}'".format(tofind, self.items))
_cache = {}
# given a list of texts return the match score for each
# and the best score and text with best score
#====================================================================
def _get_match_ratios(texts, match_against):
"""Get the match ratio of how each item in texts compared to match_against"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
for text in texts:
if 0:
pass
if (text, match_against) in _cache:
ratios[text] = _cache[(text, match_against)]
elif(match_against, text) in _cache:
ratios[text] = _cache[(match_against, text)]
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(six.text_type(match_against), six.text_type(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text] = ratio
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
_cache[(match_against, text)] = ratios[text]
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
return ratios, best_ratio, best_text
#====================================================================
def find_best_match(search_text, item_texts, items, limit_ratio = .5):
"""Return the item that best matches the search_text
* **search_text** The text to search for
* **item_texts** The list of texts to search through
* **items** The list of items corresponding (1 to 1)
to the list of texts to search through.
* **limit_ratio** How well the text has to match the best match.
If the best match matches lower then this then it is not
considered a match and a MatchError is raised, (default = .5)
"""
search_text = _cut_at_eol(_cut_at_tab(search_text))
text_item_map = UniqueDict()
# Clean each item, make it unique and map to
# to the item index
for text, item in zip(item_texts, items):
text_item_map[_cut_at_eol(_cut_at_tab(text))] = item
ratios, best_ratio, best_text = \
_get_match_ratios(text_item_map.keys(), search_text)
if best_ratio < limit_ratio:
raise MatchError(items = text_item_map.keys(), tofind = search_text)
return text_item_map[best_text]
#====================================================================
_after_tab = re.compile(r"\t.*", re.UNICODE)
_after_eol = re.compile(r"\n.*", re.UNICODE)
_non_word_chars = re.compile(r"\W", re.UNICODE)
def _cut_at_tab(text):
"""Clean out non characters from the string and return it"""
# remove anything after the first tab
return _after_tab.sub("", text)
def _cut_at_eol(text):
"""Clean out non characters from the string and return it"""
# remove anything after the first EOL
return _after_eol.sub("", text)
def _clean_non_chars(text):
"""Remove non word characters"""
# should this also remove everything after the first tab?
# remove non alphanumeric characters
return _non_word_chars.sub("", text)
def is_above_or_to_left(ref_control, other_ctrl):
"""Return true if the other_ctrl is above or to the left of ref_control"""
text_r = other_ctrl.rectangle()
ctrl_r = ref_control.rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
return False
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
return False
# text control top left corner is below control
# top left corner - so not to the above or left :)
if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left:
return False
return True
#====================================================================
distance_cuttoff = 999
def get_non_text_control_name(ctrl, controls, text_ctrls):
"""
return the name for this control by finding the closest
text control above and to its left
"""
names = []
# simply look for an instance of the control in the list,
# we don't use list.index() method as it invokes __eq__
ctrl_index = 0
for i, c in enumerate(controls):
if c is ctrl:
ctrl_index = i
break
ctrl_friendly_class_name = ctrl.friendly_class_name()
if ctrl_index != 0:
prev_ctrl = controls[ctrl_index-1]
prev_ctrl_text = prev_ctrl.window_text()
if prev_ctrl.friendly_class_name() == "Static" and \
prev_ctrl.is_visible() and prev_ctrl_text and \
is_above_or_to_left(ctrl, prev_ctrl):
names.append(
prev_ctrl_text +
ctrl_friendly_class_name)
best_name = ''
closest = distance_cuttoff
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.rectangle()
ctrl_r = ctrl.rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
# at first I just calculated the distance from the top left
# corner of one control to the top left corner of the other control
# but this was not best, so as a text control should either be above
# or to the left of the control I get the distance between
# the top left of the non text control against the
# Top-Right of the text control (text control to the left)
# Bottom-Left of the text control (text control above)
# then I get the min of these two
# We do not actually need to calculate the difference here as we
# only need a comparative number. As long as we find the closest one
# the actual distance is not all that important to us.
# this reduced the unit tests run on my by about 1 second
# (from 61 ->60 s)
# (x^2 + y^2)^.5
#distance = (
# (text_r.left - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.bottom - ctrl_r.top) ** 2) \
# ** .5 # ^.5
#distance2 = (
# (text_r.right - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.top - ctrl_r.top) ** 2) \
# ** .5 # ^.5
distance = abs(text_r.left - ctrl_r.left) + abs(text_r.bottom - ctrl_r.top)
distance2 = abs(text_r.right - ctrl_r.left) + abs(text_r.top - ctrl_r.top)
distance = min(distance, distance2)
# UpDown control should use Static text only because edit box text is often useless
if ctrl_friendly_class_name == "UpDown" and \
text_ctrl.friendly_class_name() == "Static" and distance < closest:
# TODO: use search in all text controls for all non-text ones
# (like Dijkstra algorithm vs Floyd one)
closest = distance
ctrl_text = text_ctrl.window_text()
if ctrl_text is None:
# the control probably doesn't exist so skip it
continue
best_name = ctrl_text + ctrl_friendly_class_name
# if this distance was closer than the last one
elif distance < closest:
closest = distance
#if text_ctrl.window_text() == '':
# best_name = ctrl_friendly_class_name + ' '.join(text_ctrl.texts()[1:2])
#else:
ctrl_text = text_ctrl.window_text()
if ctrl_text is None:
# the control probably doesn't exist so skip it
continue
best_name = ctrl_text + ctrl_friendly_class_name
names.append(best_name)
return names
#====================================================================
def get_control_names(control, allcontrols, textcontrols):
"""Returns a list of names for this control"""
names = []
# if it has a reference control - then use that
#if hasattr(control, 'ref') and control.ref:
# control = control.ref
# Add the control based on it's friendly class name
friendly_class_name = control.friendly_class_name()
names.append(friendly_class_name)
# if it has some character text then add it base on that
# and based on that with friendly class name appended
cleaned = control.window_text()
# Todo - I don't like the hardcoded classnames here!
if cleaned and control.has_title:
names.append(cleaned)
names.append(cleaned + friendly_class_name)
elif control.has_title and friendly_class_name != 'TreeView':
try:
for text in control.texts()[1:]:
names.append(friendly_class_name + text)
except Exception:
#import traceback
#from .actionlogger import ActionLogger
pass #ActionLogger().log('Warning! Cannot get control.texts()') #\nTraceback:\n' + traceback.format_exc())
# so find the text of the nearest text visible control
non_text_names = get_non_text_control_name(control, allcontrols, textcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# it didn't have visible text
else:
# so find the text of the nearest text visible control
non_text_names = get_non_text_control_name(control, allcontrols, textcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# return the names - and make sure there are no duplicates or empty values
cleaned_names = set(names) - set([None, ""])
return cleaned_names
#====================================================================
class UniqueDict(dict):
"""A dictionary subclass that handles making its keys unique"""
def __setitem__(self, text, item):
"""Set an item of the dictionary"""
# this text is already in the map
# so we need to make it unique
if text in self:
# find next unique text after text1
unique_text = text
counter = 2
while unique_text in self:
unique_text = text + str(counter)
counter += 1
# now we also need to make sure the original item
# is under text0 and text1 also!
if text + '0' not in self:
dict.__setitem__(self, text+'0', self[text])
dict.__setitem__(self, text+'1', self[text])
# now that we don't need original 'text' anymore
# replace it with the uniq text
text = unique_text
# add our current item
dict.__setitem__(self, text, item)
def find_best_matches(
self,
search_text,
clean = False,
ignore_case = False):
"""Return the best matches for search_text in the items
* **search_text** the text to look for
* **clean** whether to clean non text characters out of the strings
* **ignore_case** compare strings case insensitively
"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
if ignore_case:
search_text = search_text.lower()
ratio_calc.set_seq1(search_text)
ratios = {}
best_ratio = 0
best_texts = []
ratio_offset = 1
if clean:
ratio_offset *= .9
if ignore_case:
ratio_offset *= .9
for text_ in self:
# make a copy of the text as we need the original later
text = text_
if clean:
text = _clean_non_chars(text)
if ignore_case:
text = text.lower()
# check if this item is in the cache - if yes, then retrieve it
if (text, search_text) in _cache:
ratios[text_] = _cache[(text, search_text)]
elif(search_text, text) in _cache:
ratios[text_] = _cache[(search_text, text)]
# not in the cache - calculate it and add it to the cache
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# if a very quick check reveals that this is not going
# to match then
ratio = ratio_calc.real_quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.ratio() * ratio_offset
# save the match we got and store it in the cache
ratios[text_] = ratio
_cache[(text, search_text)] = ratio
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(six.text_type(search_text), six.text_type(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text_] = ratio
#print "%5s" %("%0.2f"% ratio), search_text, `text`
# if this is the best so far then update best stats
if ratios[text_] > best_ratio and \
ratios[text_] >= find_best_control_match_cutoff:
best_ratio = ratios[text_]
best_texts = [text_]
elif ratios[text_] == best_ratio:
best_texts.append(text_)
#best_ratio *= ratio_offset
return best_ratio, best_texts
#====================================================================
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# get the visible text controls so that we can get
# the closest text if the control has no text
text_ctrls = [ctrl_ for ctrl_ in controls
if ctrl_.can_be_label and ctrl_.is_visible() and ctrl_.window_text()]
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls, text_ctrls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map
#====================================================================
def find_best_control_matches(search_text, controls):
"""Returns the control that is the the best match to search_text
This is slightly differnt from find_best_match in that it builds
up the list of text items to search through using information
from each control. So for example for there is an OK, Button
then the following are all added to the search list:
"OK", "Button", "OKButton"
But if there is a ListView (which do not have visible 'text')
then it will just add "ListView".
"""
name_control_map = build_unique_dict(controls)
#print ">>>>>>>", repr(name_control_map).decode("ascii", "ignore")
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl, controls)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
search_text = six.text_type(search_text)
best_ratio, best_texts = name_control_map.find_best_matches(search_text)
best_ratio_ci, best_texts_ci = \
name_control_map.find_best_matches(search_text, ignore_case = True)
best_ratio_clean, best_texts_clean = \
name_control_map.find_best_matches(search_text, clean = True)
best_ratio_clean_ci, best_texts_clean_ci = \
name_control_map.find_best_matches(
search_text, clean = True, ignore_case = True)
if best_ratio_ci > best_ratio:
best_ratio = best_ratio_ci
best_texts = best_texts_ci
if best_ratio_clean > best_ratio:
best_ratio = best_ratio_clean
best_texts = best_texts_clean
if best_ratio_clean_ci > best_ratio:
best_ratio = best_ratio_clean_ci
best_texts = best_texts_clean_ci
if best_ratio < find_best_control_match_cutoff:
raise MatchError(items = name_control_map.keys(), tofind = search_text)
return [name_control_map[best_text] for best_text in best_texts]
#
#def GetControlMatchRatio(text, ctrl):
# # get the texts for the control
# ctrl_names = get_control_names(ctrl)
#
# #get the best match for these
# matcher = UniqueDict()
# for name in ctrl_names:
# matcher[name] = ctrl
#
# best_ratio, unused = matcher.find_best_matches(text)
#
# return best_ratio
#
#
#
#def get_controls_ratios(search_text, controls):
# name_control_map = UniqueDict()
#
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
#
# match_ratios, best_ratio, best_text = \
# _get_match_ratios(name_control_map.keys(), search_text)
#
# return match_ratios, best_ratio, best_text,
| bsd-3-clause | 5,249,343,812,821,854,000 | 34.272727 | 118 | 0.581309 | false |
RIKILT/CITESspeciesDetect | CheckCriteriaBlastSingleSample.py | 1 | 5204 | #!/usr/bin/env python
# Script which test the different filtering thresholds per barcode
# Returns per barcode the detected species which match the criteria
import sys
import os
### Get the OTU abundance from the file (This is per barcode)
def GetOTUabundance(statFile, pOTU):
# Local variables
f = open(statFile)
abundance={}
#OTUabun=100
for line in f:
# Remove the enter from the end of the line
line = line.rstrip()
### Get the different barcode from the statistics file
if (line.startswith("############ Statistics for barcode: ")):
barcode=line.split("############ Statistics for barcode: ")[1].replace(" ############", "")
if not(barcode in abundance.keys()):
abundance[barcode]=1
#print barcode
else:
if (line.startswith("# combined file: ")):
assignedReads=int(line.split("\t")[1])
OTUabun=assignedReads*(pOTU/100)
#print barcode+"\t"+str(assignedReads)+"\t"+str(OTUabun)
abundance[barcode]=OTUabun
### Close the file and return the dictionary
f.close()
return abundance
### Function to retrieve the different organisms from the blast summary
def GetHitsPerBarcode(abundance, InFile, pident, OutFile):
# Local variables
f = open(InFile, "r")
output = open(OutFile, "w")
CountSpec={}
OTU=""
qlen=0
for line in f:
# Remove the enter from the end of the line
line = line.rstrip()
### Get barcodes but ignore title lines
if (line.startswith("#####")):
if (line.startswith("##### Results for:")):
output.write("\n"+line+"\n")
barcode=line.split("##### Results for: ")[1].replace(" #####", "")
output.write("OTU abun "+barcode+":\t"+str(abundance[barcode])+"\n")
### Get a different length per barcode
if ( barcode == "ITS2" ):
qlen=100
elif (barcode == "rbcL-mini"):
qlen=140
elif ( barcode == "trnL_P6loop" ):
qlen=10
else:
qlen=200
else:
### Ignore the blast line of the output
if (line.startswith("OTU")):
splitLine = line.split("\t")
### Check if the size of the OTU is above the OTU abundance
if (abundance[barcode] <= int(splitLine[0].split("size=")[1].replace(";",""))):
### Get the top hit (based on bitscore)
if (OTU == splitLine[0]):
if not (splitLine[4] < bitscore):
### Is your line matching the criteria (Query length and percentage of identity)
if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ):
output.write(line+"\n")
else:
### Get the next values
OTU=splitLine[0]
bitscore=splitLine[4]
### Is your line matching the criteria (Query length and percentage of identity)
if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ):
output.write(line+"\n")
else:
### Skip the empty lines
if (line != ""):
### Only get the title lines from the blast output
if (line.startswith("qseqid")):
#print line
output.write(line+"\n")
### Close the files
output.close()
f.close()
### Retrieve the hits per barcode
def GetAllHitsPerBarcode(abundance, InFile, pident, OutFile):
# Local variables
f = open(InFile, "r")
output = open(OutFile, "w")
CountSpec={}
OTU=""
qlen=0
for line in f:
# Remove the enter from the end of the line
line = line.rstrip()
### Get barcodes but ignore title lines
if (line.startswith("#####")):
if (line.startswith("##### Results for:")):
output.write("\n"+line+"\n")
barcode=line.split("##### Results for: ")[1].replace(" #####", "")
output.write("OTU abun "+barcode+":\t"+str(abundance[barcode])+"\n")
### Get a different length per barcode
if ( barcode == "ITS2" ):
qlen=100
elif (barcode == "rbcL-mini"):
qlen=140
elif ( barcode == "trnL_P6loop" ):
qlen=10
else:
qlen=200
else:
### Ignore the blast line of the output
if (line.startswith("OTU")):
splitLine = line.split("\t")
### Check if the size of the OTU is above the OTU abundance
if (abundance[barcode] <= int(splitLine[0].split("size=")[1].replace(";",""))):
if ( (int(splitLine[1] ) >= qlen) and (float(splitLine[3]) >= pident) ):
output.write(line+"\n")
else:
### Skip the empty lines
if (line != ""):
### Only get the title lines from the blast output
if (line.startswith("qseqid")):
output.write(line+"\n")
### Close the files
output.close()
f.close()
### Check all the input and call all the functions
def main(argv):
### Check the input
if (len(argv) == 6 ):
### Catch the variable files
statFile=argv[0]
InFile=argv[1]
FullInFile=argv[2]
OutName=argv[3]
### Variables
pOTU=float(argv[4])
pident=int(argv[5])
### Local variables
OutFile=OutName+"_"+str(pident)+"_"+str(pOTU)+".tsv"
FullOutFile=OutName+"_"+str(pident)+"_"+str(pOTU)+"_Full.tsv"
### Call your functions
abundance=GetOTUabundance(statFile, pOTU)
GetHitsPerBarcode(abundance, InFile, pident, OutFile)
GetAllHitsPerBarcode(abundance, FullInFile, pident, FullOutFile)
else:
print "Wrong type of arguments: python CheckCriteriaBlastSingleFile.py <inFile> <OutFile>"
### Call your main function
if __name__ == "__main__":
main(sys.argv[1:])
| bsd-3-clause | -4,412,462,166,043,438,600 | 26.828877 | 94 | 0.62452 | false |
jberci/resolwe | resolwe/flow/views/data.py | 1 | 8515 | """Data viewset."""
from elasticsearch_dsl.query import Q
from django.db import transaction
from django.db.models import Count
from rest_framework import exceptions, mixins, status, viewsets
from rest_framework.decorators import list_route
from rest_framework.response import Response
from resolwe.elastic.composer import composer
from resolwe.elastic.viewsets import ElasticSearchCombinedViewSet
from resolwe.flow.models import Collection, Data, Entity, Process
from resolwe.flow.models.utils import fill_with_defaults
from resolwe.flow.serializers import DataSerializer
from resolwe.flow.utils import get_data_checksum
from resolwe.permissions.loader import get_permissions_class
from resolwe.permissions.mixins import ResolwePermissionsMixin
from resolwe.permissions.shortcuts import get_objects_for_user
from resolwe.permissions.utils import assign_contributor_permissions, copy_permissions
from ..elastic_indexes import DataDocument
from .mixins import ResolweCheckSlugMixin, ResolweCreateModelMixin, ResolweUpdateModelMixin
class DataViewSet(ElasticSearchCombinedViewSet,
ResolweCreateModelMixin,
mixins.RetrieveModelMixin,
ResolweUpdateModelMixin,
mixins.DestroyModelMixin,
ResolwePermissionsMixin,
ResolweCheckSlugMixin,
viewsets.GenericViewSet):
"""API view for :class:`Data` objects."""
queryset = Data.objects.all().prefetch_related('process', 'descriptor_schema', 'contributor')
serializer_class = DataSerializer
permission_classes = (get_permissions_class(),)
document_class = DataDocument
filtering_fields = ('id', 'slug', 'version', 'name', 'created', 'modified', 'contributor', 'owners',
'status', 'process', 'process_type', 'type', 'process_name', 'tags', 'collection',
'parents', 'children', 'entity', 'started', 'finished', 'text')
filtering_map = {
'name': 'name.raw',
'contributor': 'contributor_id',
'owners': 'owner_ids',
'process_name': 'process_name.ngrams',
}
ordering_fields = ('id', 'created', 'modified', 'started', 'finished', 'name', 'contributor',
'process_name', 'process_type', 'type')
ordering_map = {
'name': 'name.raw',
'process_type': 'process_type.raw',
'type': 'type.raw',
'process_name': 'process_name.raw',
'contributor': 'contributor_sort',
}
ordering = '-created'
def get_always_allowed_arguments(self):
"""Return query arguments which are always allowed."""
return super().get_always_allowed_arguments() + [
'hydrate_data',
'hydrate_collections',
'hydrate_entities',
]
def custom_filter_tags(self, value, search):
"""Support tags query."""
if not isinstance(value, list):
value = value.split(',')
filters = [Q('match', **{'tags': item}) for item in value]
search = search.query('bool', must=filters)
return search
def custom_filter_text(self, value, search):
"""Support general query using the 'text' attribute."""
if isinstance(value, list):
value = ' '.join(value)
should = [
Q('match', slug={'query': value, 'operator': 'and', 'boost': 10.0}),
Q('match', **{'slug.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}),
Q('match', name={'query': value, 'operator': 'and', 'boost': 10.0}),
Q('match', **{'name.ngrams': {'query': value, 'operator': 'and', 'boost': 5.0}}),
Q('match', contributor_name={'query': value, 'operator': 'and', 'boost': 5.0}),
Q('match', **{'contributor_name.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}),
Q('match', owner_names={'query': value, 'operator': 'and', 'boost': 5.0}),
Q('match', **{'owner_names.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}),
Q('match', process_name={'query': value, 'operator': 'and', 'boost': 5.0}),
Q('match', **{'process_name.ngrams': {'query': value, 'operator': 'and', 'boost': 2.0}}),
Q('match', status={'query': value, 'operator': 'and', 'boost': 2.0}),
Q('match', type={'query': value, 'operator': 'and', 'boost': 2.0}),
]
# Add registered text extensions.
for extension in composer.get_extensions(self):
if hasattr(extension, 'text_filter'):
should += extension.text_filter(value)
search = search.query('bool', should=should)
return search
def create(self, request, *args, **kwargs):
"""Create a resource."""
collections = request.data.get('collections', [])
# check that user has permissions on all collections that Data
# object will be added to
for collection_id in collections:
try:
collection = Collection.objects.get(pk=collection_id)
except Collection.DoesNotExist:
return Response({'collections': ['Invalid pk "{}" - object does not exist.'.format(collection_id)]},
status=status.HTTP_400_BAD_REQUEST)
if not request.user.has_perm('add_collection', obj=collection):
if request.user.has_perm('view_collection', obj=collection):
raise exceptions.PermissionDenied(
"You don't have `ADD` permission on collection (id: {}).".format(collection_id)
)
else:
raise exceptions.NotFound(
"Collection not found (id: {}).".format(collection_id)
)
self.define_contributor(request)
if kwargs.pop('get_or_create', False):
response = self.perform_get_or_create(request, *args, **kwargs)
if response:
return response
return super().create(request, *args, **kwargs)
@list_route(methods=['post'])
def get_or_create(self, request, *args, **kwargs):
"""Get ``Data`` object if similar already exists, otherwise create it."""
kwargs['get_or_create'] = True
return self.create(request, *args, **kwargs)
def perform_get_or_create(self, request, *args, **kwargs):
"""Perform "get_or_create" - return existing object if found."""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
process = serializer.validated_data.get('process')
process_input = request.data.get('input', {})
fill_with_defaults(process_input, process.input_schema)
checksum = get_data_checksum(process_input, process.slug, process.version)
data_qs = Data.objects.filter(
checksum=checksum,
process__persistence__in=[Process.PERSISTENCE_CACHED, Process.PERSISTENCE_TEMP],
)
data_qs = get_objects_for_user(request.user, 'view_data', data_qs)
if data_qs.exists():
data = data_qs.order_by('created').last()
serializer = self.get_serializer(data)
return Response(serializer.data)
def perform_create(self, serializer):
"""Create a resource."""
process = serializer.validated_data.get('process')
if not process.is_active:
raise exceptions.ParseError(
'Process retired (id: {}, slug: {}/{}).'.format(process.id, process.slug, process.version)
)
with transaction.atomic():
instance = serializer.save()
assign_contributor_permissions(instance)
# Entity is added to the collection only when it is
# created - when it only contains 1 Data object.
entities = Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1)
# Assign data object to all specified collections.
collection_pks = self.request.data.get('collections', [])
for collection in Collection.objects.filter(pk__in=collection_pks):
collection.data.add(instance)
copy_permissions(collection, instance)
# Add entities to which data belongs to the collection.
for entity in entities:
entity.collections.add(collection)
copy_permissions(collection, entity)
| apache-2.0 | -4,215,530,313,429,322,000 | 43.348958 | 116 | 0.601996 | false |
astroclark/bhextractor | bin/bhex_scalemassdemo.py | 1 | 4019 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2015 James Clark <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
bhextractor_plotpca.py
Construct waveform catalogues and PCA for plotting and diagnostics
"""
import numpy as np
from matplotlib import pyplot as pl
import bhextractor_pca as bhex
import pycbc.types
import pycbc.filter
from pycbc.psd import aLIGOZeroDetHighPower
# -------------------------------
# USER INPUT
catalogue_name='Q'
theta=90.0
# END USER INPUT
# -------------------------------
# -------------------------------
# ANALYSIS
catlen=4
#
# Setup and then build the catalogue
#
catalogue = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048,
catalogue_len=catlen, mtotal_ref=250, Dist=1., theta=theta)
oriwave250 = np.copy(catalogue.aligned_catalogue[0,:])
#
# Do the PCA
#
pca = bhex.waveform_pca(catalogue)
#
# Build a 350 solar mass waveform from the 250 Msun PCs#
# Just use the first waveform
betas = pca.projection_plus[catalogue.waveform_names[0]]
times = np.arange(0,len(catalogue.aligned_catalogue[0,:])/2048.,1./2048)
recwave350 = bhex.reconstruct_waveform(pca.pca_plus, betas, len(catalogue.waveform_names),
mtotal_target=350.0)
#
# Now make a catalogue at 350 solar masses and then compute the overlap
#
catalogue350 = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048,
catalogue_len=catlen, mtotal_ref=350, Dist=1., theta=theta)
oriwave350 = np.copy(catalogue350.aligned_catalogue[0,:])
# Finally, compute the match between the reconstructed 350 Msun system and the
# system we generated at that mass in the first place
recwave350_pycbc = pycbc.types.TimeSeries(np.real(recwave350), delta_t=1./2048)
oriwave250_pycbc = pycbc.types.TimeSeries(np.real(oriwave250), delta_t=1./2048)
oriwave350_pycbc = pycbc.types.TimeSeries(np.real(oriwave350), delta_t=1./2048)
psd = aLIGOZeroDetHighPower(len(recwave350_pycbc.to_frequencyseries()),
recwave350_pycbc.to_frequencyseries().delta_f, low_freq_cutoff=10.0)
match_cat = pycbc.filter.match(oriwave250_pycbc.to_frequencyseries(),
oriwave350_pycbc.to_frequencyseries(), psd=psd,
low_frequency_cutoff=10)[0]
match_rec = pycbc.filter.match(recwave350_pycbc.to_frequencyseries(),
oriwave350_pycbc.to_frequencyseries(), psd=psd,
low_frequency_cutoff=10)[0]
print 'Match between 250 and 350 Msun catalogue waves: ', match_cat
print 'Match between 350 reconstruction and 350 catalogue wave: ', match_rec
#
# Make plots
#
if 1:
print "Plotting reconstructions"
fig, ax = pl.subplots(nrows=2,ncols=1)
ax[0].plot(times,np.real(oriwave250), 'b', label='250 M$_{\odot}$ catalogue')
ax[0].plot(times,np.real(oriwave350), 'g', label='350 M$_{\odot}$ catalogue')
ax[0].set_xlim(0,2.5)
ax[0].set_title('Match = %f'% match_cat)
ax[0].legend(loc='upper left',prop={'size':10})
ax[1].plot(times,np.real(oriwave350), 'g', label='350 M$_{\odot}$ catalogue')
ax[1].plot(times,np.real(recwave350), 'r', label='350 M$_{\odot}$ reconstruction')
ax[1].set_xlim(0,2.5)
ax[1].set_xlabel('Time (s)')
ax[1].set_title('Match = %f'% match_rec)
ax[1].legend(loc='upper left',prop={'size':10})
fig.tight_layout()
fig.savefig('scalemassdemo.png')
| gpl-2.0 | -3,245,627,533,540,735,000 | 30.645669 | 90 | 0.693705 | false |
jeremy24/rnn-classifier | letter_tools.py | 1 | 1759 | import glob
import imghdr
import os
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
def _is_ascii(s):
return all(ord(c) < 128 for c in s)
def text2png(text, fullpath, color="#FFF", bgcolor="#000",
fontfullpath="assets/fonts/Oswald-Bold.ttf",
fontsize=35, leftpadding=3, rightpadding=3,
width=20, height=None):
REPLACEMENT_CHARACTER = u'\uFFFD'
NEWLINE_REPLACEMENT_STRING = ' ' + REPLACEMENT_CHARACTER + ' '
font = ImageFont.load_default() if fontfullpath == None else ImageFont.truetype(fontfullpath, fontsize)
text = text.replace('\n', NEWLINE_REPLACEMENT_STRING)
lines = []
line = u""
if len(text) == 0:
print("\tNo valid text, bailing out...")
return
for word in text.split():
if word == REPLACEMENT_CHARACTER: # give a blank line
lines.append(line[1:]) # slice the white space in the begining of the line
line = u""
lines.append(u"") # the blank line
elif font.getsize(line + ' ' + word)[0] <= (width - rightpadding - leftpadding):
line += ' ' + word
else: # start a new line
lines.append(line[1:]) # slice the white space in the begining of the line
line = u""
# TODO: handle too long words at this point
line += ' ' + word # for now, assume no word alone can exceed the line width
if len(line) != 0:
lines.append(line[1:]) # add the last line
line_height = font.getsize(text)[1]
width = font.getsize(text)[0]
width += int(width * .10)
if height is not None:
line_height = height
img_height = line_height * (len(lines) + 1)
img = Image.new("RGBA", (width, img_height), bgcolor)
draw = ImageDraw.Draw(img)
y = 0
for line in lines:
draw.text((leftpadding, y), line, color, font=font)
y += line_height
img.save(fullpath)
| mit | -5,027,793,800,516,839,000 | 25.253731 | 104 | 0.667425 | false |
mihaisoloi/conpaas | conpaas-director/setup.py | 1 | 2293 | #!/usr/bin/env python
import os
import sys
import shutil
from pwd import getpwnam
from grp import getgrnam
from setuptools import setup
from pkg_resources import Requirement, resource_filename
CPSVERSION = '1.2.0'
CONFDIR = '/etc/cpsdirector'
if not os.geteuid() == 0:
CONFDIR = 'cpsdirectorconf'
long_description = """
ConPaaS: an integrated runtime environment for elastic Cloud applications
=========================================================================
"""
setup(name='cpsdirector',
version=CPSVERSION,
description='ConPaaS director',
author='Emanuele Rocca',
author_email='[email protected]',
url='http://www.conpaas.eu/',
download_url='http://www.conpaas.eu/download/',
license='BSD',
packages=[ 'cpsdirector', ],
include_package_data=True,
zip_safe=False,
package_data={ 'cpsdirector': [ 'ConPaaS.tar.gz', ] },
data_files=[ ( CONFDIR, [ 'director.cfg.example', 'director.cfg.multicloud-example', 'ConPaaS.tar.gz' ] ), ],
scripts=[ 'cpsadduser.py', 'director.wsgi', 'cpsconf.py', 'cpscheck.py' ],
install_requires=[ 'cpslib', 'flask-sqlalchemy', 'apache-libcloud', 'netaddr' ],
dependency_links=[ 'http://www.linux.it/~ema/conpaas/cpslib-%s.tar.gz' % CPSVERSION, ],)
if __name__ == "__main__" and sys.argv[1] == "install":
# overwrite /etc/cpsdirector/{config,scripts}
for what in 'config', 'scripts':
targetdir = os.path.join(CONFDIR, what)
if os.path.isdir(targetdir):
shutil.rmtree(targetdir)
shutil.copytree(os.path.join('conpaas', what), targetdir)
if not os.path.exists(os.path.join(CONFDIR, "director.cfg")):
# copy director.cfg.example under CONFDIR/director.cfg
conffile = resource_filename(Requirement.parse("cpsdirector"),
"director.cfg.example")
shutil.copyfile(conffile, os.path.join(CONFDIR, "director.cfg"))
# create 'certs' dir
if not os.path.exists(os.path.join(CONFDIR, "certs")):
os.mkdir(os.path.join(CONFDIR, "certs"))
# set www-data as the owner of CONFDIR
try:
os.chown(CONFDIR, getpwnam('www-data').pw_uid,
getgrnam('www-data').gr_gid)
except OSError:
print "W: 'chown www-data:www-data %s' failed" % CONFDIR
| bsd-3-clause | 3,412,883,459,429,648,400 | 34.828125 | 115 | 0.627126 | false |
sssilver/angler | rod/rod/handler/lesson.py | 1 | 3347 | import flask
import decimal
import dateutil.parser
import flask.ext.login
import rod
import rod.model.student
import rod.model.lesson
import rod.model.group
import rod.model.company
import rod.model.transaction
import rod.model.schemas
lesson_handler = flask.Blueprint('lesson', __name__)
@lesson_handler.route('/lesson', methods=['GET'])
def list_lesson():
teacher_id = flask.request.args.get('teacher_id')
query = rod.model.lesson.Lesson.query.filter_by(is_deleted=False)
if teacher_id:
lessons = query.filter_by(teacher_id=teacher_id).all()
else:
lessons = query.all()
return flask.jsonify({
'items': rod.model.schemas.LessonSchema(many=True).dump(lessons).data,
'count': len(lessons)
})
@lesson_handler.route('/group/<int:group_id>/lessons', methods=['POST'])
def file_lesson(group_id):
lesson_data = flask.request.json
# File the lesson
lesson = rod.model.lesson.Lesson()
lesson.time = dateutil.parser.parse(lesson_data['datetime'])
lesson.teacher_id = flask.ext.login.current_user.id
lesson.group_id = group_id
rod.model.db.session.add(lesson)
companies = set() # Companies that had students in this lesson
# Record attendance
for student_id, is_absent in lesson_data['attendance'].iteritems():
student_id = int(student_id) # Cast to int, as JSON keys are always strings
# Get each student
student = rod.model.db.session.query(rod.model.student.Student).get(student_id)
# Get their membership in this group
membership_query = rod.model.db.session.query(rod.model.student.Membership)
membership = membership_query.filter_by(student_id=student_id).filter_by(group_id=group_id).one()
if membership.tariff.type == 'student': # Student tariff?
# For personal tariffs, we wanna update the student's balance
student.balance -= membership.tariff.price
student_transaction = rod.model.transaction.StudentTransaction()
student_transaction.staff_id = lesson.teacher_id
student_transaction.amount = membership.tariff.price
student_transaction.student_id = student_id
student_transaction.type = 'payment'
rod.model.db.session.add(student_transaction)
elif membership.tariff.type == 'company': # Company tariff?
# For corporate tariffs, we just wanna collect the companies that had students
# in this lesson. We'll update their balances separately down the road.
companies.add(membership.company)
# Corporate balances are updated once,
# regardless of how many students were in the group during this lesson
for company in companies:
# Update the corporate balance
company.balance -= membership.tariff.price
company_transaction = rod.model.transaction.CompanyTransaction()
company_transaction.staff_id = lesson.teacher_id
company_transaction.amount = membership.tariff.price
company_transaction.company_id = company.id
company_transaction.type = 'payment'
rod.model.db.session.add(company_transaction)
# Finally, commit the entire big transaction
rod.model.db.session.commit()
return flask.jsonify(rod.model.schemas.LessonSchema().dump(lesson).data)
| bsd-3-clause | -5,314,708,541,440,655,000 | 35.78022 | 105 | 0.689573 | false |
henry-ngo/VIP | vip_hci/preproc/skysubtraction.py | 1 | 4110 | #! /usr/bin/env python
"""
Module with sky subtraction function.
"""
from __future__ import division
__author__ = 'C. Gomez @ ULg'
__all__ = ['cube_subtract_sky_pca']
import numpy as np
def cube_subtract_sky_pca(sci_cube, sky_cube, mask, ref_cube=None, ncomp=2):
""" PCA based sky subtraction.
Parameters
----------
sci_cube : array_like
3d array of science frames.
sky_cube : array_like
3d array of sky frames.
mask : array_like
Mask indicating the region for the analysis. Can be created with the
function vip_hci.var.create_ringed_spider_mask.
ref_cube : array_like or None
Reference cube.
ncomp : int
Sets the number of PCs you want to use in the sky subtraction.
Returns
-------
Sky subtracted cube.
"""
from ..pca import prepare_matrix, svd_wrapper
if sci_cube.shape[1] != sky_cube.shape[1] or sci_cube.shape[2] != \
sky_cube.shape[2]:
raise TypeError('Science and Sky frames sizes do not match')
if ref_cube is not None:
if sci_cube.shape[1] != ref_cube.shape[1] or sci_cube.shape[2] != \
ref_cube.shape[2]:
raise TypeError('Science and Reference frames sizes do not match')
# Getting the EVs from the sky cube
Msky = prepare_matrix(sky_cube, scaling=None, verbose=False)
sky_pcs = svd_wrapper(Msky, 'lapack', sky_cube.shape[0], False,
False)
sky_pcs_cube = sky_pcs.reshape(sky_cube.shape[0], sky_cube.shape[1],
sky_cube.shape[1])
# Masking the science cube
sci_cube_masked = np.zeros_like(sci_cube)
ind_masked = np.where(mask == 0)
for i in range(sci_cube.shape[0]):
masked_image = np.copy(sci_cube[i])
masked_image[ind_masked] = 0
sci_cube_masked[i] = masked_image
Msci_masked = prepare_matrix(sci_cube_masked, scaling=None,
verbose=False)
# Masking the PCs learned from the skies
sky_pcs_cube_masked = np.zeros_like(sky_pcs_cube)
for i in range(sky_pcs_cube.shape[0]):
masked_image = np.copy(sky_pcs_cube[i])
masked_image[ind_masked] = 0
sky_pcs_cube_masked[i] = masked_image
# Project the masked frames onto the sky PCs to get the coefficients
transf_sci = np.zeros((sky_cube.shape[0], Msci_masked.shape[0]))
for i in range(Msci_masked.shape[0]):
transf_sci[:, i] = np.inner(sky_pcs, Msci_masked[i].T)
Msky_pcs_masked = prepare_matrix(sky_pcs_cube_masked, scaling=None,
verbose=False)
mat_inv = np.linalg.inv(np.dot(Msky_pcs_masked, Msky_pcs_masked.T))
transf_sci_scaled = np.dot(mat_inv, transf_sci)
# Obtaining the optimized sky and subtraction
sci_cube_skysub = np.zeros_like(sci_cube)
for i in range(Msci_masked.shape[0]):
sky_opt = np.array([np.sum(
transf_sci_scaled[j, i] * sky_pcs_cube[j] for j in range(ncomp))])
sci_cube_skysub[i] = sci_cube[i] - sky_opt
# Processing the reference cube (if any)
if ref_cube is not None:
ref_cube_masked = np.zeros_like(ref_cube)
for i in range(ref_cube.shape[0]):
masked_image = np.copy(ref_cube[i])
masked_image[ind_masked] = 0
ref_cube_masked[i] = masked_image
Mref_masked = prepare_matrix(ref_cube_masked, scaling=None,
verbose=False)
transf_ref = np.zeros((sky_cube.shape[0], Mref_masked.shape[0]))
for i in range(Mref_masked.shape[0]):
transf_ref[:, i] = np.inner(sky_pcs, Mref_masked[i].T)
transf_ref_scaled = np.dot(mat_inv, transf_ref)
ref_cube_skysub = np.zeros_like(ref_cube)
for i in range(Mref_masked.shape[0]):
sky_opt = np.array([np.sum(
transf_ref_scaled[j, i] * sky_pcs_cube[j] for j in range(ncomp))])
ref_cube_skysub[i] = ref_cube[i] - sky_opt
return sci_cube_skysub, ref_cube_skysub
else:
return sci_cube_skysub
| mit | 7,069,799,869,590,812,000 | 36.027027 | 82 | 0.592457 | false |
tiborsimko/invenio-oauthclient | tests/conftest.py | 1 | 10459 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import json
import os
import shutil
import tempfile
import pytest
from flask import Flask
from flask_babelex import Babel
from flask_mail import Mail
from flask_menu import Menu as FlaskMenu
from flask_oauthlib.client import OAuth as FlaskOAuth
from flask_oauthlib.client import OAuthResponse
from invenio_accounts import InvenioAccounts
from invenio_db import InvenioDB, db
from invenio_userprofiles import InvenioUserProfiles, UserProfile
from invenio_userprofiles.views import blueprint_ui_init
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib.cern import REMOTE_APP as CERN_REMOTE_APP
from invenio_oauthclient.contrib.github import REMOTE_APP as GITHUB_REMOTE_APP
from invenio_oauthclient.contrib.orcid import REMOTE_APP as ORCID_REMOTE_APP
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
@pytest.fixture
def base_app(request):
"""Flask application fixture without OAuthClient initialized."""
instance_path = tempfile.mkdtemp()
base_app = Flask('testapp')
base_app.config.update(
TESTING=True,
WTF_CSRF_ENABLED=False,
LOGIN_DISABLED=False,
CACHE_TYPE='simple',
OAUTHCLIENT_REMOTE_APPS=dict(
cern=CERN_REMOTE_APP,
orcid=ORCID_REMOTE_APP,
github=GITHUB_REMOTE_APP,
),
GITHUB_APP_CREDENTIALS=dict(
consumer_key='github_key_changeme',
consumer_secret='github_secret_changeme',
),
ORCID_APP_CREDENTIALS=dict(
consumer_key='orcid_key_changeme',
consumer_secret='orcid_secret_changeme',
),
CERN_APP_CREDENTIALS=dict(
consumer_key='cern_key_changeme',
consumer_secret='cern_secret_changeme',
),
# use local memory mailbox
EMAIL_BACKEND='flask_email.backends.locmem.Mail',
SQLALCHEMY_DATABASE_URI=os.getenv('SQLALCHEMY_DATABASE_URI',
'sqlite://'),
SERVER_NAME='localhost',
DEBUG=False,
SECRET_KEY='TEST',
SECURITY_DEPRECATED_PASSWORD_SCHEMES=[],
SECURITY_PASSWORD_HASH='plaintext',
SECURITY_PASSWORD_SCHEMES=['plaintext'],
)
FlaskMenu(base_app)
Babel(base_app)
Mail(base_app)
InvenioDB(base_app)
InvenioAccounts(base_app)
with base_app.app_context():
if str(db.engine.url) != 'sqlite://' and \
not database_exists(str(db.engine.url)):
create_database(str(db.engine.url))
db.create_all()
def teardown():
with base_app.app_context():
db.session.close()
if str(db.engine.url) != 'sqlite://':
drop_database(str(db.engine.url))
shutil.rmtree(instance_path)
request.addfinalizer(teardown)
base_app.test_request_context().push()
return base_app
def _init_app(app_):
"""Init OAuth app."""
FlaskOAuth(app_)
InvenioOAuthClient(app_)
app_.register_blueprint(blueprint_client)
app_.register_blueprint(blueprint_settings)
return app_
@pytest.fixture
def app(base_app):
"""Flask application fixture."""
base_app.config.update(
WTF_CSRF_ENABLED=False,
)
return _init_app(base_app)
@pytest.fixture
def app_with_csrf(base_app):
"""Flask application fixture with CSRF enabled."""
base_app.config.update(
WTF_CSRF_ENABLED=True,
)
return _init_app(base_app)
def _init_userprofiles(app_):
"""Init userprofiles module."""
InvenioUserProfiles(app_)
app_.register_blueprint(blueprint_ui_init)
return app_
@pytest.fixture
def app_with_userprofiles(app):
"""Configure userprofiles module with CSRF disabled."""
app.config.update(
USERPROFILES_EXTEND_SECURITY_FORMS=True,
WTF_CSRF_ENABLED=False,
)
return _init_userprofiles(app)
@pytest.fixture
def app_with_userprofiles_csrf(app):
"""Configure userprofiles module with CSRF enabled."""
app.config.update(
USERPROFILES_EXTEND_SECURITY_FORMS=True,
WTF_CSRF_ENABLED=True,
)
return _init_userprofiles(app)
@pytest.fixture
def models_fixture(app):
"""Flask app with example data used to test models."""
with app.app_context():
datastore = app.extensions['security'].datastore
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.commit()
return app
@pytest.fixture
def params():
"""Fixture for remote app params."""
def params(x):
return dict(
request_token_params={'scope': ''},
base_url='https://foo.bar/',
request_token_url=None,
access_token_url='https://foo.bar/oauth/access_token',
authorize_url='https://foo.bar/oauth/authorize',
consumer_key=x,
consumer_secret='testsecret',
)
return params
@pytest.fixture
def remote():
"""Fixture for remote app."""
return type('test_remote', (), dict(
name='example_remote',
request_token_params={'scope': ''},
base_url='https://foo.bar/',
request_token_url=None,
access_token_url='https://foo.bar/oauth/access_token',
authorize_url='https://foo.bar/oauth/authorize',
consumer_key='testkey',
consumer_secret='testsecret',
))()
@pytest.fixture
def views_fixture(base_app, params):
"""Flask application with example data used to test views."""
with base_app.app_context():
datastore = base_app.extensions['security'].datastore
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.create_user(
email='[email protected]',
password='tester',
active=True
)
datastore.commit()
base_app.config['OAUTHCLIENT_REMOTE_APPS'].update(
dict(
test=dict(
authorized_handler=lambda *args, **kwargs: 'TEST',
params=params('testid'),
title='MyLinkedTestAccount',
),
test_invalid=dict(
authorized_handler=lambda *args, **kwargs: 'TEST',
params=params('test_invalidid'),
title='Test Invalid',
),
full=dict(
params=params('fullid'),
title='Full',
),
)
)
FlaskOAuth(base_app)
InvenioOAuthClient(base_app)
base_app.register_blueprint(blueprint_client)
base_app.register_blueprint(blueprint_settings)
return base_app
@pytest.fixture
def example_github(request):
"""ORCID example data."""
return {
'name': 'Josiah Carberry',
'expires_in': 3599,
'access_token': 'test_access_token',
'refresh_token': 'test_refresh_token',
'scope': '/authenticate',
'token_type': 'bearer',
}
@pytest.fixture
def example_orcid(request):
"""ORCID example data."""
return {
'name': 'Josiah Carberry',
'expires_in': 3599,
'orcid': '0000-0002-1825-0097',
'access_token': 'test_access_token',
'refresh_token': 'test_refresh_token',
'scope': '/authenticate',
'token_type': 'bearer'
}, dict(external_id='0000-0002-1825-0097',
external_method='orcid',
user=dict(
profile=dict(
full_name='Josiah Carberry'
)
)
)
@pytest.fixture()
def example_cern(request):
"""CERN example data."""
file_path = os.path.join(os.path.dirname(__file__),
'data/oauth_response_content.json')
with open(file_path) as response_file:
json_data = response_file.read()
return OAuthResponse(
resp=None,
content=json_data,
content_type='application/json'
), dict(
access_token='test_access_token',
token_type='bearer',
expires_in=1199,
refresh_token='test_refresh_token'
), dict(
user=dict(
email='[email protected]',
profile=dict(username='taccount', full_name='Test Account'),
),
external_id='123456', external_method='cern',
active=True
)
@pytest.fixture(scope='session')
def orcid_bio():
"""ORCID response fixture."""
file_path = os.path.join(os.path.dirname(__file__), 'data/orcid_bio.json')
with open(file_path) as response_file:
data = json.load(response_file)
return data
@pytest.fixture()
def user(app_with_userprofiles):
"""Create users."""
with db.session.begin_nested():
datastore = app_with_userprofiles.extensions['security'].datastore
user1 = datastore.create_user(email='[email protected]',
password='tester', active=True)
profile = UserProfile(username='mynick', user=user1)
db.session.add(profile)
db.session.commit()
return user1
@pytest.fixture()
def form_test_data():
"""Test data to fill a registration form."""
return dict(
email='[email protected]',
profile=dict(
full_name='Test Tester',
username='test123',
),
)
| mit | 5,920,141,418,307,998,000 | 28.461972 | 78 | 0.599292 | false |
justrypython/EAST | svm_model_v2.py | 1 | 2801 | #encoding:UTF-8
import os
import numpy as np
import sys
import cv2
import matplotlib.pyplot as plt
from sklearn.svm import NuSVC, SVC
import datetime
import pickle
#calculate the area
def area(p):
p = p.reshape((-1, 2))
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, np.concatenate((p[1:], [p[0]])))
def calc_xy(p0, p1, p2):
cos = calc_cos(p0, p1, p2)
dis = calc_dis(p0, p2)
return dis * cos, dis * np.sqrt(1 - np.square(cos))
def calc_dis(p0, p1):
return np.sqrt(np.sum(np.square(p0-p1)))
def calc_cos(p0, p1, p2):
A = p1 - p0
B = p2 - p0
num = np.dot(A, B)
demon = np.linalg.norm(A) * np.linalg.norm(B)
return num / demon
def calc_new_xy(boxes):
box0 = boxes[:8]
box1 = boxes[8:]
x, y = calc_xy(box1[4:6], box1[6:], box0[:2])
dis = calc_dis(box1[4:6], box1[6:])
area0 = area(box0)
area1 = area(box1)
return x/dis, y/dis
if __name__ == '__main__':
test = True
path = '/media/zhaoke/b0685ee4-63e3-4691-ae02-feceacff6996/data/'
paths = os.listdir(path)
paths = [i for i in paths if '.txt' in i]
boxes = np.empty((800000, 9))
cnt = 0
for txt in paths:
f = open(path+txt, 'r')
lines = f.readlines()
f.close()
lines = [i.replace('\n', '').split(',') for i in lines]
lines = np.array(lines).astype(np.uint32)
boxes[cnt*10:cnt*10+len(lines)] = lines
cnt += 1
zeros = boxes==[0, 0, 0, 0, 0, 0, 0, 0, 0]
zeros_labels = zeros.all(axis=1)
zeros_labels = np.where(zeros_labels==True)
idboxes = boxes[boxes[:, 8]==7]
idboxes = np.tile(idboxes[:, :8], (1, 10))
idboxes = idboxes.reshape((-1, 8))
boxes = np.delete(boxes, zeros_labels[0], axis=0)
idboxes = np.delete(idboxes, zeros_labels[0], axis=0)
boxes_idboxes = np.concatenate((boxes[:, :8], idboxes), axis=1)
start_time = datetime.datetime.now()
print start_time
new_xy = np.apply_along_axis(calc_new_xy, 1, boxes_idboxes)
end_time = datetime.datetime.now()
print end_time - start_time
if test:
with open('clf_address_v2.pickle', 'rb') as f:
clf = pickle.load(f)
cnt = 0
for i, xy in enumerate(new_xy):
cls = int(clf.predict([xy])[0])
if cls == int(boxes[i, 8]):
cnt += 1
if i % 10000 == 0 and i != 0:
print i, ':', float(cnt) / i
else:
clf = SVC()
start_time = datetime.datetime.now()
print start_time
clf.fit(new_xy[:], boxes[:, 8])
end_time = datetime.datetime.now()
print end_time - start_time
with open('clf.pickle', 'wb') as f:
pickle.dump(clf, f)
print 'end' | gpl-3.0 | 3,479,348,540,319,028,700 | 29.129032 | 69 | 0.551946 | false |
BorgERP/borg-erp-6of3 | l10n_hr/l10n_hr_fiskal/__openerp__.py | 1 | 2505 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Module: l10n_hr_fiskal
# Author: Davor Bojkić
# mail: [email protected]
# Copyright (C) 2012- Daj Mi 5,
# http://www.dajmi5.com
# Contributions: Hrvoje ThePython - Free Code!
# Goran Kliska (AT) Slobodni Programi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Croatian localization - Fiscalization module",
"description" : """
Fiskalizacija izdanih računa
====================================
Author: Davor Bojkić - Bole @ DAJ MI 5
www.dajmi5.com
Contributions: Hrvoje ThePython - Free Code!
Goran Kliska @ Slobodni Programi
Preduvjeti:
na serveru instalirati:
python-dev, python-ms2crypto, libxmlsec1-dev
build/install pyxmlsec-0.3.2!
""",
"version" : "1.02",
"author" : "DAJ MI 5",
"category" : "Localisation/Croatia",
"website": "http://www.dajmi5.com",
'depends': [
'base_vat',
'account_storno',
'l10n_hr_account',
'openerp_crypto',
],
#'external_dependencies':{'python':['m2crypto','pyxmlsec'],
# 'bin':'libxmlsec-dev'},
'update_xml': [
'certificate_view.xml',
'fiskalizacija_view.xml',
'security/ir.model.access.csv',
'account_view.xml',
'account_invoice_view.xml',
'l10n_hr_fiskal_data.xml',
],
"active": False,
"installable": True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,083,689,587,987,897,000 | 35.26087 | 78 | 0.544764 | false |
dmort27/panphon | panphon/test/test_permissive_methods.py | 1 | 2679 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division, absolute_import
import unittest
from panphon import permissive
class TestFeatureTableAPI(unittest.TestCase):
def setUp(self):
self.ft = permissive.PermissiveFeatureTable()
def test_fts(self):
self.assertEqual(len(self.ft.fts('u')), 22)
# def test_seg_fts(self):
# self.assertEqual(len(self.ft.seg_fts('p')), 21)
def test_match(self):
self.assertTrue(self.ft.match(self.ft.fts('u'), self.ft.fts('u')))
def test_fts_match(self):
self.assertTrue(self.ft.fts_match(self.ft.fts('u'), 'u'))
def test_longest_one_seg_prefix(self):
self.assertEqual(self.ft.longest_one_seg_prefix('pap'), 'p')
def test_validate_word(self):
self.assertTrue(self.ft.validate_word('tik'))
def test_segs(self):
self.assertEqual(self.ft.segs('tik'), ['t', 'i', 'k'])
def test_word_fts(self):
self.assertEqual(len(self.ft.word_fts('tik')), 3)
def test_seg_known(self):
self.assertTrue(self.ft.seg_known('t'))
def test_filter_string(self):
self.assertEqual(len(self.ft.filter_string('pup$')), 3)
def test_segs_safe(self):
self.assertEqual(len(self.ft.segs_safe('pup$')), 4)
def test_filter_segs(self):
self.assertEqual(len(self.ft.filter_segs(['p', 'u', 'p', '$'])), 3)
def test_fts_intersection(self):
self.assertIn(('-', 'voi'), self.ft.fts_intersection(['p', 't', 'k']))
def test_fts_match_any(self):
self.assertTrue(self.ft.fts_match_any([('-', 'voi')], ['p', 'o', '$']))
def test_fts_match_all(self):
self.assertTrue(self.ft.fts_match_all([('-', 'voi')], ['p', 't', 'k']))
def test_fts_contrast2(self):
self.assertTrue(self.ft.fts_contrast2([], 'voi', ['p', 'b', 'r']))
def test_fts_count(self):
self.assertEqual(self.ft.fts_count([('-', 'voi')], ['p', 't', 'k', 'r']), 3)
self.assertEqual(self.ft.fts_count([('-', 'voi')], ['r', '$']), 0)
def test_match_pattern(self):
self.assertEqual(len(self.ft.match_pattern([set([('-', 'voi')])], 'p')), 1)
def test_match_pattern_seq(self):
self.assertTrue(self.ft.match_pattern_seq([set([('-', 'voi')])], 'p'))
# def test_all_segs_matching_fts(self):
# self.assertIn('p', self.ft.all_segs_matching_fts([('-', 'voi')]))
def test_compile_regex_from_str(self):
pass
def test_segment_to_vector(self):
self.assertEqual(len(self.ft.segment_to_vector('p')), 22)
def test_word_to_vector_list(self):
self.assertEqual(len(self.ft.word_to_vector_list('pup')), 3)
| mit | -7,131,835,215,741,403,000 | 32.074074 | 84 | 0.592012 | false |
summychou/CSForOSS | CA/OSSQt_DataMasterRigster.py | 1 | 2535 | # -*- coding: utf-8 -*-
# import sqlite3 as sqlite
import sys
import uuid
from pysqlcipher3 import dbapi2 as sqlite
def main():
print("***************** Welcome to OSS DataMaster-Rigster System *******************")
print("* *")
print("******************************************************************************")
conn = sqlite.connect('DataMasterSystem.db')
c = conn.cursor()
c.execute("PRAGMA key='data_master_system'") # 对加密的sqlite文件进行解密
try:
c.execute('create table data_master_system (data_master_name text, password text, unique_id text)')
except sqlite.OperationalError as e:
pass
unique_id = uuid.uuid1()
data_masters = c.execute("select * from data_master_system").fetchall()
if len(data_masters) != 0:
data_master_name = input("[*] Input your data master name:\n")
for col in data_masters:
if data_master_name.strip() == col[0]:
print("[!] Data Master Name has existed!")
print("******************************************************************************")
print("* *")
print("*********************** Data Master Rigster Is Failed! ***********************")
sys.exit(-1)
else:
data_master_name = input("[*] Input your data master name:\n")
password = input("[*] Input your password:\n")
repeat_password = input("[*] Input your password again:\n")
if password.strip() != repeat_password.strip():
print("[!] Password is not equal to RePassword!")
print("******************************************************************************")
print("* *")
print("*********************** Data Master Rigster Is Failed! ***********************")
sys.exit(-1)
c.execute('insert into data_master_system values ("{}", "{}", "{}")'.format(data_master_name, password, unique_id))
conn.commit()
c.close()
print("******************************************************************************")
print("* *")
print("********************* Data Master Rigster Is Successful! *********************")
if __name__ == '__main__':
main()
| mit | 9,171,648,892,522,189,000 | 45.592593 | 119 | 0.394036 | false |
Microvellum/Fluid-Designer | win64-vc/2.78/scripts/startup/fluid_operators/fd_api_doc.py | 1 | 10496 | '''
Created on Jan 27, 2017
@author: montes
'''
import bpy
from inspect import *
import mv
import os
import math
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import legal,inch,cm
from reportlab.platypus import Image
from reportlab.platypus import Paragraph,Table,TableStyle
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph, Frame, Spacer, PageTemplate, PageBreak
from reportlab.lib import colors
from reportlab.lib.pagesizes import A3, A4, landscape, portrait
from reportlab.lib.styles import ParagraphStyle, getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.platypus.flowables import HRFlowable
class OPS_create_api_doc(bpy.types.Operator):
bl_idname = "fd_api_doc.create_api_doc"
bl_label = "Create Fluid API Documentation"
output_path = bpy.props.StringProperty(name="Output Path")
def esc_uscores(self, string):
if string:
return string.replace("_", "\_")
else:
return
def exclude_builtins(self, classes, module):
new_classes = []
for cls in classes:
if module in cls[1].__module__:
new_classes.append(cls)
return new_classes
def write_sidebar(self, modules):
filepath = os.path.join(self.output_path, "FD_Sidebar.md")
file = open(filepath, "w")
fw = file.write
fw("# Fluid Designer\n")
fw("* [Home](Home)\n")
fw("* [Understanding the User Interface](Understanding-the-User-Interface)\n")
fw("* [Navigating the 3D Viewport](Navigating-the-3D-Viewport)\n")
fw("* [Navigating the Library Browser](Navigating-the-Library-Browser)\n")
fw("* [The Room Builder Panel](The-Room-Builder-Panel)\n")
fw("* [Hotkeys](Fluid-Designer-Hot-Keys)\n\n")
fw("# API Documentation\n")
for mod in modules:
fw("\n## mv.{}\n".format(mod[0]))
classes = self.exclude_builtins(getmembers(mod[1], predicate=isclass), mod[0])
if len(classes) > 0:
for cls in classes:
fw("* [{}()]({})\n".format(self.esc_uscores(cls[0]),
self.esc_uscores(cls[0])))
else:
fw("* [mv.{}]({})\n".format(mod[0], mod[0]))
file.close()
def write_class_doc(self, cls):
filepath = os.path.join(self.output_path, cls[0] + ".md")
file = open(filepath, "w")
fw = file.write
fw("# class {}{}{}{}\n\n".format(cls[1].__module__, ".", cls[0], "():"))
if getdoc(cls[1]):
fw(self.esc_uscores(getdoc(cls[1])) + "\n\n")
for func in getmembers(cls[1], predicate=isfunction):
if cls[0] in func[1].__qualname__:
args = getargspec(func[1])[0]
args_str = ', '.join(item for item in args if item != 'self')
fw("## {}{}{}{}\n\n".format(self.esc_uscores(func[0]),
"(",
self.esc_uscores(args_str) if args_str else " ",
")"))
if getdoc(func[1]):
fw(self.esc_uscores(getdoc(func[1])) + "\n")
else:
fw("Undocumented.\n\n")
file.close()
def write_mod_doc(self, mod):
filepath = os.path.join(self.output_path, mod[0] + ".md")
file = open(filepath, "w")
fw = file.write
fw("# module {}{}:\n\n".format("mv.", mod[0]))
if getdoc(mod[1]):
fw(self.esc_uscores(getdoc(mod[1])) + "\n\n")
for func in getmembers(mod[1], predicate=isfunction):
args = getargspec(func[1])[0]
args_str = ', '.join(item for item in args if item != 'self')
fw("## {}{}{}{}\n\n".format(self.esc_uscores(func[0]),
"(",
self.esc_uscores(args_str if args_str else " "),
")"))
if getdoc(func[1]):
fw(self.esc_uscores(getdoc(func[1])) + "\n")
else:
fw("Undocumented.\n\n")
file.close()
def execute(self, context):
modules = getmembers(mv, predicate=ismodule)
self.write_sidebar(modules)
for mod in modules:
classes = self.exclude_builtins(getmembers(mod[1], predicate=isclass), mod[0])
if len(classes) > 0:
for cls in classes:
self.write_class_doc(cls)
else:
self.write_mod_doc(mod)
return {'FINISHED'}
class OPS_create_content_overview_doc(bpy.types.Operator):
bl_idname = "fd_api_doc.create_content_overview"
bl_label = "Create Fluid Content Overview Documentation"
INCLUDE_FILE_NAME = "doc_include.txt"
write_path = bpy.props.StringProperty(name="Write Path", default="")
elements = []
package = None
def write_html(self):
pass
def read_include_file(self, path):
dirs = []
file_path = os.path.join(path, self.INCLUDE_FILE_NAME)
if os.path.exists(file_path):
file = open(os.path.join(path, self.INCLUDE_FILE_NAME), "r")
dirs_raw = list(file)
for dir in dirs_raw:
dirs.append(dir.replace("\n", ""))
return dirs
def create_hdr(self, name, font_size):
hdr_style = TableStyle([('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
('BOTTOMPADDING', (0, 0), (-1, -1), 15),
('TOPPADDING', (0, 0), (-1, -1), 15),
('FONTSIZE', (0, 0), (-1, -1), 8),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('ALIGN', (0, 0), (-1, 0), 'LEFT'),
('LINEBELOW', (0, 0), (-1, -1), 2, colors.black),
('BACKGROUND', (0, 1), (-1, -1), colors.white)])
name_p = Paragraph(name, ParagraphStyle("Category name style", fontSize=font_size))
hdr_tbl = Table([[name_p]], colWidths = 500, rowHeights = None, repeatRows = 1)
hdr_tbl.setStyle(hdr_style)
self.elements.append(hdr_tbl)
def create_img_table(self, dir):
item_tbl_data = []
item_tbl_row = []
for i, file in enumerate(os.listdir(dir)):
last_item = len(os.listdir(dir)) - 1
if ".png" in file:
img = Image(os.path.join(dir, file), inch, inch)
img_name = file.replace(".png", "")
if len(item_tbl_row) == 4:
item_tbl_data.append(item_tbl_row)
item_tbl_row = []
elif i == last_item:
item_tbl_data.append(item_tbl_row)
i_tbl = Table([[img], [Paragraph(img_name, ParagraphStyle("item name style", wordWrap='CJK'))]])
item_tbl_row.append(i_tbl)
if len(item_tbl_data) > 0:
item_tbl = Table(item_tbl_data, colWidths=125)
self.elements.append(item_tbl)
self.elements.append(Spacer(1, inch * 0.5))
def search_dir(self, path):
thumb_dir = False
for file in os.listdir(path):
if ".png" in file:
thumb_dir = True
if thumb_dir:
self.create_img_table(path)
for file in os.listdir(path):
if os.path.isdir(os.path.join(path, file)):
self.create_hdr(file, font_size=14)
self.search_dir(os.path.join(path, file))
def write_pdf(self, mod):
file_path = os.path.join(self.write_path if self.write_path != "" else mod.__path__[0], "doc")
file_name = mod.__package__ + ".pdf"
if not os.path.exists(file_path):
os.mkdir(file_path)
doc = SimpleDocTemplate(os.path.join(file_path, file_name),
pagesize = A4,
leftMargin = 0.25 * inch,
rightMargin = 0.25 * inch,
topMargin = 0.25 * inch,
bottomMargin = 0.25 * inch)
lib_name = mod.__package__.replace("_", " ")
self.create_hdr(lib_name, font_size=24)
print("\n", lib_name, "\n")
dirs = self.read_include_file(os.path.join(mod.__path__[0], "doc"))
if len(dirs) > 0:
for d in dirs:
path = os.path.join(mod.__path__[0], d)
if os.path.exists(path):
self.create_hdr(d.title(), font_size=18)
self.search_dir(path)
else:
products_path = os.path.join(mod.__path__[0], "products")
if os.path.exists(products_path):
self.create_hdr("Products", font_size=18)
self.search_dir(products_path)
inserts_path = os.path.join(mod.__path__[0], "inserts")
if os.path.exists(inserts_path):
self.create_hdr("Inserts", font_size=18)
self.search_dir(inserts_path)
doc.build(self.elements)
def execute(self, context):
packages = mv.utils.get_library_packages(context)
for p in packages:
mod = __import__(p)
self.write_pdf(mod)
return {'FINISHED'}
classes = [
OPS_create_api_doc,
OPS_create_content_overview_doc,
]
def register():
for c in classes:
bpy.utils.register_class(c)
def unregister():
for c in classes:
bpy.utils.unregister_class(c)
if __name__ == "__main__":
register() | gpl-3.0 | -1,779,006,538,429,056,000 | 35.447917 | 118 | 0.477229 | false |
sol/pygments | tests/test_julia.py | 1 | 1791 | # -*- coding: utf-8 -*-
"""
Julia Tests
~~~~~~~~~~~
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import unittest
from pygments.lexers import JuliaLexer
from pygments.token import Token
class JuliaTests(unittest.TestCase):
def setUp(self):
self.lexer = JuliaLexer()
def test_unicode(self):
"""
Test that unicode character, √, in an expression is recognized
"""
fragment = u's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n'
tokens = [
(Token.Name, u's'),
(Token.Text, u' '),
(Token.Operator, u'='),
(Token.Text, u' '),
(Token.Operator, u'\u221a'),
(Token.Punctuation, u'('),
(Token.Punctuation, u'('),
(Token.Literal.Number.Integer, u'1'),
(Token.Operator, u'/'),
(Token.Name, u'n'),
(Token.Punctuation, u')'),
(Token.Text, u' '),
(Token.Operator, u'*'),
(Token.Text, u' '),
(Token.Name, u'sum'),
(Token.Punctuation, u'('),
(Token.Name, u'count'),
(Token.Text, u' '),
(Token.Operator, u'.^'),
(Token.Text, u' '),
(Token.Literal.Number.Integer, u'2'),
(Token.Punctuation, u')'),
(Token.Text, u' '),
(Token.Operator, u'-'),
(Token.Text, u' '),
(Token.Name, u'mu'),
(Token.Text, u' '),
(Token.Operator, u'.^'),
(Token.Literal.Number.Integer, u'2'),
(Token.Punctuation, u')'),
(Token.Text, u'\n'),
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
| bsd-2-clause | 331,492,644,497,758,000 | 29.844828 | 71 | 0.472331 | false |
Ramyak/CodingPractice | algo_practice/sort/merge_sort.py | 1 | 1251 | #!/usr/bin/env python
a = [5, 3, 6, 3, 1, 2]
def merge_sort(in_array, left, right):
print 'Before: ({} , {}) : {}'.format(str(left), str(right), (in_array[left:right + 1]))
if right - left >= 1:
mid = ((right - left) / 2) + left
if mid > right:
return
merge_sort(in_array, left, mid)
merge_sort(in_array, mid + 1, right)
# Merge
tmp_array = [None] * (right - left + 1)
l_start = left
r_start = mid + 1
i = 0
for i in range(right + 1 - left):
if l_start > mid or r_start > right:
break
if in_array[l_start] < in_array[r_start]:
tmp_array[i] = in_array[l_start]
l_start += 1
else:
tmp_array[i] = in_array[r_start]
r_start += 1
if l_start <= mid:
tmp_array[i:right + 1] = in_array[l_start:mid + 1]
else:
tmp_array[i:right + 1] = in_array[r_start:right + 1]
in_array[left:right + 1] = tmp_array
print 'After: ({} , {}) : {}'.format(str(left), str(right), (in_array[left:right + 1]))
return in_array
if __name__ == '__main__':
print merge_sort(a, 0, len(a) - 1)
| gpl-2.0 | 6,766,558,600,085,191,000 | 31.076923 | 92 | 0.46283 | false |
AMOboxTV/AMOBox.LegoBuild | plugin.video.exodus/resources/lib/sources/ninemovies_mv_tv.py | 1 | 7333 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json,time
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import directstream
class source:
def __init__(self):
self.domains = ['9movies.to']
self.base_link = 'http://9movies.to'
self.search_link = '/sitemap'
def movie(self, imdb, title, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def ninemovies_cache(self):
try:
url = urlparse.urljoin(self.base_link, self.search_link)
result = client.source(url)
result = result.split('>Movies and TV-Shows<')[-1]
result = client.parseDOM(result, 'ul', attrs = {'class': 'sub-menu'})[0]
result = re.compile('href="(.+?)">(.+?)<').findall(result)
result = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in result]
return result
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
try:
result = ''
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
title = cleantitle.get(title)
try: episode = data['episode']
except: pass
url = cache.get(self.ninemovies_cache, 120)
url = [(i[0], i[1], cleantitle.get(i[1])) for i in url]
url = [(i[0], i[1], i[2], re.sub('\d*$', '', i[2])) for i in url]
url = [i for i in url if title == i[2]] + [i for i in url if title == i[3]]
if 'season' in data and int(data['season']) > 1:
url = [(i[0], re.compile('\s+(\d*)$').findall(i[1])) for i in url]
url = [(i[0], i[1][0]) for i in url if len(i[1]) > 0]
url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[1])]
url = url[0][0]
url = urlparse.urljoin(self.base_link, url)
result = client.source(url)
years = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year']
years = ['%s' % str(years), '%s' % str(int(years)+1), '%s' % str(int(years)-1)]
year = re.compile('<dd>(\d{4})</dd>').findall(result)[0]
if not year in years: return sources
except:
pass
try:
if not result == '': raise Exception()
url = urlparse.urljoin(self.base_link, url)
try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
except: pass
result = client.source(url)
except:
pass
try: quality = client.parseDOM(result, 'dd', attrs = {'class': 'quality'})[0].lower()
except: quality = 'hd'
if quality == 'cam' or quality == 'ts': quality = 'CAM'
elif quality == 'hd' or 'hd ' in quality: quality = 'HD'
else: quality = 'SD'
result = client.parseDOM(result, 'ul', attrs = {'class': 'episodes'})
result = zip(client.parseDOM(result, 'a', ret='data-id'), client.parseDOM(result, 'a'))
result = [(i[0], re.findall('(\d+)', i[1])) for i in result]
result = [(i[0], ''.join(i[1][:1])) for i in result]
try: result = [i for i in result if '%01d' % int(i[1]) == '%01d' % int(episode)]
except: pass
links = [urllib.urlencode({'hash_id': i[0], 'referer': url}) for i in result]
for i in links: sources.append({'source': 'gvideo', 'quality': quality, 'provider': 'Ninemovies', 'url': i, 'direct': True, 'debridonly': False})
try:
if not quality == 'HD': raise Exception()
quality = directstream.googletag(self.resolve(links[0]))[0]['quality']
if not quality == 'SD': raise Exception()
for i in sources: i['quality'] = 'SD'
except:
pass
return sources
except:
return sources
def resolve(self, url):
try:
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
headers = {'X-Requested-With': 'XMLHttpRequest'}
now = time.localtime()
url = '/ajax/film/episode?hash_id=%s&f=&p=%s' % (data['hash_id'], now.tm_hour + now.tm_min)
url = urlparse.urljoin(self.base_link, url)
result = client.source(url, headers=headers, referer=data['referer'])
result = json.loads(result)
grabber = {'flash': 1, 'json': 1, 's': now.tm_min, 'link': result['videoUrlHash'], '_': int(time.time())}
grabber = result['grabber'] + '?' + urllib.urlencode(grabber)
result = client.source(grabber, headers=headers, referer=url)
result = json.loads(result)
url = [(re.findall('(\d+)', i['label']), i['file']) for i in result if 'label' in i and 'file' in i]
url = [(int(i[0][0]), i[1]) for i in url if len(i[0]) > 0]
url = sorted(url, key=lambda k: k[0])
url = url[-1][1]
url = client.request(url, output='geturl')
if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
else: url = url.replace('https://', 'http://')
return url
except:
return
| gpl-2.0 | -7,445,599,848,500,218,000 | 35.849246 | 157 | 0.517114 | false |
LibraryOfCongress/gazetteer | etl/parser/hmdb.py | 1 | 1998 | import sys, json, os, datetime
from shapely.geometry import asShape, mapping
from fiona import collection
from core import Dump
import core
import codecs
#name, cmt, desc, link1_href
def extract_shapefile(shapefile, uri_name, simplify_tolerance=None):
for feature in collection(shapefile, "r"):
geometry = feature["geometry"]
properties = feature["properties"]
#calculate centroid
geom_obj = asShape(geometry)
centroid = feature["geometry"]["coordinates"]
name = properties["name"]
address = {
"street" : feature.get("cmt")
}
#alternate names
alternates = []
feature_code = "HSTS"
source = properties #keep all fields anyhow
# unique URI which internally gets converted to the place id.
uri = properties.get("link1_href") + "#"+feature["id"]
timeframe = {}
updated = datetime.datetime.utcnow().replace(second=0, microsecond=0).isoformat()
place = {
"name":name,
"centroid":centroid,
"feature_code": feature_code,
"geometry":geometry,
"is_primary": True,
"source": source,
"alternate": alternates,
"updated": updated,
"uris":[uri],
"relationships": [],
"timeframe":timeframe,
"admin":[]
}
dump.write(uri, place)
if __name__ == "__main__":
shapefile, dump_path = sys.argv[1:3]
uri_name = "http://www.hmdb.org/"
#simplify_tolerance = .01 # ~ 11km (.001 = 111m)
simplify_tolerance = None
dump_basename = os.path.basename(shapefile)
dump = Dump(dump_path + "/shapefile/"+ dump_basename + ".%04d.json.gz")
dump.max_rows = "1000"
extract_shapefile(shapefile, uri_name, simplify_tolerance)
dump.close()
#python hmdb.py ../../../hmdb.shp hmdbdump
| mit | -4,251,280,113,043,998,000 | 24.615385 | 89 | 0.553554 | false |
lucasmello/Driloader | driloader/browser/internet_explorer.py | 1 | 4135 | # pylint: disable=anomalous-backslash-in-string, too-many-locals,
# pylint: disable=multiple-statements
"""
Module that abstract operations to handle Internet Explorer versions.
"""
import os
import platform
import re
import xml.etree.ElementTree as ET
import requests
from driloader.browser.exceptions import BrowserDetectionError
from driloader.http.proxy import Proxy
from driloader.utils.commands import Commands
from .basebrowser import BaseBrowser
from .drivers import Driver
from ..http.operations import HttpOperations
from ..utils.file import FileHandler
class IE(BaseBrowser):
"""
Implements all BaseBrowser methods to find the proper
Internet Explorer version.
"""
_find_version_32_regex = r'IEDriverServer_Win32_([\d]+\.[\d]+\.[\d])'
_find_version_64_regex = r'IEDriverServer_x64_([\d]+\.[\d]+\.[\d])'
def __init__(self, driver: Driver):
super().__init__('IE')
self.x64 = IE._is_windows_x64()
self._driver = driver
def _latest_driver(self):
"""
Gets the latest ie driver version.
:return: the latest ie driver version.
"""
resp = requests.get(self._config.latest_release_url(),
proxies=Proxy().urls)
xml_dl = ET.fromstring(resp.text)
root = ET.ElementTree(xml_dl)
tag = root.getroot().tag
tag = tag.rpartition('}')[0] + tag.rpartition('}')[1]
contents = root.findall(tag + 'Contents')
last_version = 0
version_str = '0.0.0'
last_version_str = '0.0.0'
if self.x64:
pattern = IE._find_version_64_regex
else:
pattern = IE._find_version_32_regex
os_type = 'x64' if self.x64 else 'Win32'
for content in contents:
key = content.find(tag + 'Key').text
driver_section = 'IEDriverServer_{}_'.format(os_type) in key
if driver_section:
version_nbr = re.search(pattern, key)
if version_nbr is not None:
version_str = version_nbr.group(1)
try:
if version_str is not None:
version = float(version_str.rpartition('.')[0])
else:
version = 0
except ValueError:
version = 0
if version >= last_version:
last_version = version
last_version_str = version_str
return last_version_str
def _driver_matching_installed_version(self):
# TODO: Version matcher for IE.
return self._latest_driver()
def installed_browser_version(self):
""" Returns Internet Explorer version.
Args:
Returns:
Returns an int with the browser version.
Raises:
BrowserDetectionError: Case something goes wrong when
getting browser version.
"""
if os.name != "nt":
raise BrowserDetectionError('Unable to retrieve IE version.',
'System is not Windows.')
cmd = ['reg', 'query',
'HKEY_LOCAL_MACHINE\Software\Microsoft\Internet Explorer',
'/v', 'svcVersion']
try:
output = Commands.run(cmd)
reg = re.search(self._config.search_regex_pattern(), str(output))
str_version = reg.group(0)
int_version = int(str_version.partition(".")[0])
except Exception as error:
raise BrowserDetectionError('Unable to retrieve IE version '
'from system.', error) from error
return int_version
@staticmethod
def _is_windows_x64():
return platform.machine().endswith('64')
def get_driver(self):
"""
API to expose to client to download the driver and unzip it.
"""
self._driver.version = self._driver_matching_installed_version()
return self._download_and_unzip(HttpOperations(),
self._driver, FileHandler())
| mit | -4,724,605,588,114,314,000 | 32.893443 | 77 | 0.564692 | false |
PyBossa/pybossa | pybossa/auth/webhook.py | 1 | 1833 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
class WebhookAuth(object):
_specific_actions = []
def __init__(self, project_repo):
self.project_repo = project_repo
@property
def specific_actions(self):
return self._specific_actions
def can(self, user, action, webhook=None, project_id=None):
action = ''.join(['_', action])
return getattr(self, action)(user, webhook, project_id)
def _create(self, user, webhook, project_id=None):
return False
def _read(self, user, webhook=None, project_id=None):
if user.is_anonymous() or (webhook is None and project_id is None):
return False
project = self._get_project(webhook, project_id)
return user.admin or user.id in project.owners_ids
def _update(self, user, webhook, project_id=None):
return False
def _delete(self, user, webhook, project_id=None):
return False
def _get_project(self, webhook, project_id):
if webhook is not None:
return self.project_repo.get(webhook.project_id)
return self.project_repo.get(project_id)
| agpl-3.0 | -6,669,273,108,467,857,000 | 33.584906 | 77 | 0.67976 | false |
spring01/libPSI | lib/python/grendel/chemistry/molecule_stub.py | 1 | 11089 | # FAILED ATTEMPT AT REFORMING MOLECULESTUB
# Perhaps I'll come back to this later...
#from grendel import type_checking_enabled, sanity_checking_enabled
#from grendel.chemistry.atom import Atom
#from grendel.gmath import magnitude, angle_between_vectors
#from grendel.gmath.matrix import Matrix
#from grendel.util.decorators import with_flexible_arguments, typechecked, IterableOf
#from grendel.util.exceptions import ChemistryError
#from grendel.util.overloading import overloaded, OverloadedFunctionCallError
#from grendel.util.strings import indented
#from grendel.util.units import strip_units, DistanceUnit, AngularUnit, Radians, Degrees, Angstroms, isunit
## Immutable "friend class" of Molecule
#class MoleculeStub(object):
# """
# Immutable "friend" class of `Molecule`, used for hashing.
# """
#
# ####################
# # Class Attributes #
# ####################
#
# eq_precision = 8
# same_internal_tol = {AngularUnit: 0.0001*Degrees, DistanceUnit: 1e-7*Angstroms}
#
# ##############
# # Attributes #
# ##############
#
# multiplicity = None
# """ The multiplicity of the electronic state of the molecule.
# (i.e. 2S+1 where S is the total spin). Defaults to singlet. """
#
# charge = None
# """ The charge on the molecule. Defaults to neutral. """
#
# reoriented_matrix = None
#
# ######################
# # Private Attributes #
# ######################
#
# _hash = None
# _cartesian_representation = None
# _cartesian_units = None
# _internal_representation = None
# _from_molecule = None
# _xyz = None
# _element_list = None
#
# ##################
# # Initialization #
# ##################
#
# @overloaded
# def __init__(self, *args, **kwargs):
# raise OverloadedFunctionCallError
#
# @__init__.overload_with(
# atoms=IterableOf('Atom'),
# )
# def __init__(self,
# atoms,
# **kwargs):
# self.__init__(
# [(atom.element, atom.isotope) for atom in atoms],
# Matrix([atom.position for atom in atoms]),
# **kwargs)
#
# @__init__.overload_with(
# cartesian_units=isunit,
# charge=(int, None),
# multiplicity=(int, None)
# )
# def __init__(self,
# elements_and_isotopes,
# xyz,
# cartesian_units=DistanceUnit.default,
# charge=None,
# multiplicity=None):
# self._cartesian_units = cartesian_units
# self.charge = charge if charge is not None else Molecule.default_charge
# self.multiplicity = multiplicity if multiplicity is not None else Molecule.default_multiplicity
# self._xyz = xyz
# self._element_list = elements_and_isotopes
# # TODO strip units
# tmpmol = Molecule(
# [Atom(el, iso, pos) for (el, iso), pos in zip(self._element_list, self._xyz.iter_rows)],
# charge=self.charge,
# multiplicity=self.multiplicity
# )
# self.reoriented_matrix = tmpmol.reoriented().xyz
#
# ###################
# # Special Methods #
# ###################
#
# def __hash__(self):
# if self._hash is not None:
# return self._hash
# self._hash = MoleculeDict.hash_for(self)
# return self._hash
#
# def __eq__(self, other):
# if isinstance(other, MoleculeStub):
# if [a.isotope for a in self] != [a.isotope for a in other]:
# return False
# elif (self.multiplicity, self.charge) != (other.multiplicity, other.charge):
# return False
# else:
# reoriented = self.reoriented_matrix * self._cartesian_units.to(DistanceUnit.default)
# rounded = [round(v, MoleculeStub.eq_precision) for v in reoriented.ravel()]
# other_oriented = other.reoriented_matrix * other._cartesian_units.to(DistanceUnit.default)
# other_rounded = [round(v, MoleculeStub.eq_precision) for v in other_oriented.ravel()]
# return rounded == other_rounded
# else:
# return NotImplemented
#
#
# ###########
# # Methods #
# ###########
#
# # TODO document this!
# # TODO class variables for default tolerances
# def is_valid_stub_for(self, other, cart_tol=None, internal_tol=None, ang_tol=None):
# """
# """
# # cart_tol is used for comparison between cartesian positions
# cart_tol = cart_tol or 1e-8*Angstroms
# # internal_tol should be unitless, since the difference between internal coordinates could have multiple
# # units, and we're taking the magnitude across these units
# internal_tol = internal_tol or MoleculeStub.same_internal_tol
# # ang_tol is used when comparing cartesian geometries. If the angle between two corresponding atoms
# # in self and other differs from the angle between the first two corresponding atoms in self and other
# # by more than ang_tol, we assume they do not have the same geometry and thus return False
# ang_tol = ang_tol or 1e-5*Degrees
# #--------------------------------------------------------------------------------#
# if type_checking_enabled:
# if not isinstance(other, Molecule):
# raise TypeError
# if isinstance(other, MoleculeStub):
# raise TypeError
# #--------------------------------------------------------------------------------#
# if self.multiplicity != other.multiplicity:
# return False
# elif self.charge != other.charge:
# return False
# else:
# if len(self._element_list) != other.natoms:
# for num, (element, isotope) in enumerate(self._element_list):
# if (other[num].element, other[num].isotope) != (element, isotope):
# return False
# if other.natoms <= 1:
# # if we have 1 or 0 atoms and we've gotten this far, we have a match
# return True
# #--------------------------------------------------------------------------------#
# # no failures yet, so we have to compare geometries
# # if self has an internal_representation, use it
# if self._internal_representation is not None:
# diff = self._internal_representation.values - self._internal_representation.values_for_molecule(other)
# if any(abs(d) > internal_tol[c.units.genre].in_units(c.units) for d, c in zip(diff, self._internal_representation)):
# return False
# return True
# # if mol has an internal representation, use it:
# elif other.internal_representation is not None:
# diff = other.internal_representation.values - other.internal_representation.values_for_molecule(self)
# if any(abs(d) > internal_tol[c.units.genre].in_units(c.units) for d, c in zip(diff, other.internal_representation)):
# return False
# return True
# else:
# # They're both fully cartesian. This could take a while...
# # We should first try to short-circuit as many ways as possible
# #----------------------------------------#
# # first strip units and store stripped versions to speed up the rest of the work
# # strip the units off of ang_tol
# ang_tol = strip_units(ang_tol, Radians)
# # strip the units off of cart_tol
# cart_tol = strip_units(cart_tol, self._cartesian_units)
# # make a list of positions with stripped units, since we'll use it up to three times
# stripped = [strip_units(atom, self._cartesian_units) for atom in (self if self.is_centered() else self.recentered()) ]
# other_stripped = [strip_units(atom, self._cartesian_units) for atom in (other if other.is_centered() else other.recentered())]
# #----------------------------------------#
# # Try to short-circuit negatively by looking for an inconsistancy in the angles
# # between pairs of corresponding atoms
# # If the first atom is at the origin, use the second one
# offset = 1
# if stripped[0].is_zero():
# if magnitude(stripped[0] - other_stripped[0]) > cart_tol:
# return False
# else:
# if sanity_checking_enabled and stripped[1].is_zero():
# raise ChemistryError, "FrozenMolecule:\n{}\nhas two atoms on top of each other.".format(indented(str(self)))
# if other_stripped[1].is_zero():
# return False
# else:
# offset = 2
# first_ang = angle_between_vectors(self.atoms[1].pos, other.atoms[1].pos)
# else:
# if other_stripped[0].is_zero():
# return False
# else:
# first_ang = angle_between_vectors(self.atoms[0].pos, other.atoms[0].pos)
# for apos, opos in zip(stripped[offset:], other_stripped[offset:]):
# if apos.is_zero():
# if magnitude(apos - opos) > cart_tol:
# return False
# elif opos.is_zero():
# # Try again, since Tensor.zero_cutoff could smaller than cart_tol, causing a false zero
# if magnitude(apos - opos) > cart_tol:
# return False
# else:
# ang = angle_between_vectors(apos, opos)
# if abs(ang - first_ang) > ang_tol:
# return False
# # Also, the magnitude of the distance from the center of mass should be the same:
# if abs(apos.magnitude() - opos.magnitude()) > cart_tol:
# return False
# #----------------------------------------#
# # Try to short-circuit positively:
# exact_match = True
# for apos, opos in zip(stripped, other_stripped):
# if magnitude(apos - opos) > cart_tol:
# exact_match = False
# break
# if exact_match:
# return True
# exact_match = True
# # Check negative version
# for apos, opos in zip(stripped, other_stripped):
# if magnitude(apos + opos) > cart_tol:
# exact_match = False
# break
# if exact_match:
# return True
# #----------------------------------------#
# # We can't short-circuit, so this is the only means we have left
# # It's far more expensive than the rest, but it always works.
# return self.has_same_geometry(other, cart_tol)
#
#
######################
## Dependent Imports #
######################
#
#from grendel.chemistry.molecule_dict import MoleculeDict
#from grendel.chemistry.molecule import Molecule
| gpl-2.0 | 2,493,225,813,903,470,000 | 43.534137 | 139 | 0.541257 | false |
flying-sheep/omnitool | version.py | 1 | 1888 | from functools import total_ordering
@total_ordering
class Version():
"""Organization Class for comparable Version System
Version integer uses decimal shift:
2 digits major version, 2 digits minor version, 2 digits micro version
170100 -> 17.1.0
"""
def __init__(self, integer):
if type(integer) == str:
self.int = int(integer)
elif type(integer) == int:
self.int = integer
else:
raise TypeError("Version accepts int or str, not "+str(type(integer)))
def get_version_tuple(self):
major, minor = divmod(self.int,10000)
minor, micro = divmod(minor, 100)
return major, minor, micro
def get_name(self):
major, minor, micro = tup = self.get_version_tuple()
return ".".join((str(i) for i in tup))
def __repr__(self):
return self.name
def __str__(self):
return str(self.int)
def __eq__(self, other):
if isinstance(other, Version):
return self.int == other.int
return self.int == other
def __lt__(self, other):
if isinstance(other, Version):
return self.int < other.int
return self.int < other
def __int__(self):return self.int
name = property(get_name)
as_tuple = property(get_version_tuple)
current = Version(100)
if __name__ == "__main__":
print (current)
print (current > 200)
print (current < 100)
print (current > Version(50))
assert(Version(100) > 99)
assert(99 < Version(100))
assert(100 == Version(100))
assert(100 != Version(99))
assert(Version(100) == Version(100))
assert(Version(str(Version(100))) == Version(100))
| mit | -4,095,968,977,931,674,000 | 29.95082 | 86 | 0.533898 | false |
domthu/gasistafelice | gasistafelice/auth/__init__.py | 1 | 2945 | from django.utils.translation import ugettext as _, ugettext_lazy
from django.db.models.signals import post_syncdb
import permissions
from permissions.utils import register_role, register_permission
## role-related constants
NOBODY = 'NOBODY'
GAS_MEMBER = 'GAS_MEMBER'
GAS_REFERRER_SUPPLIER = 'GAS_REFERRER_SUPPLIER'
GAS_REFERRER_ORDER = 'GAS_REFERRER_ORDER'
GAS_REFERRER_WITHDRAWAL = 'GAS_REFERRER_WITHDRAWAL'
GAS_REFERRER_DELIVERY = 'GAS_REFERRER_DELIVERY'
GAS_REFERRER_CASH = 'GAS_REFERRER_CASH'
GAS_REFERRER_TECH = 'GAS_REFERRER_TECH'
SUPPLIER_REFERRER = 'SUPPLIER_REFERRER'
ROLES_LIST = [
(NOBODY, _('Nobody')),
(SUPPLIER_REFERRER, _('Supplier referrer')),
(GAS_MEMBER, _('GAS member')),
(GAS_REFERRER_SUPPLIER, _('GAS supplier referrer')),
(GAS_REFERRER_ORDER, _('GAS order referrer')),
(GAS_REFERRER_WITHDRAWAL, _('GAS withdrawal referrer')),
(GAS_REFERRER_DELIVERY, _('GAS delivery referrer')),
(GAS_REFERRER_CASH, _('GAS cash referrer')),
(GAS_REFERRER_TECH, _('GAS technical referrer')),
]
valid_params_for_roles = (
## format
# (Role' codename, allowed model for 1st param, allowed model for 2nd param)
(SUPPLIER_REFERRER, 'supplier.Supplier', ''),
(GAS_MEMBER, 'gas.GAS', ''),
(GAS_REFERRER_CASH, 'gas.GAS', '' ),
(GAS_REFERRER_TECH, 'gas.GAS', ''),
(GAS_REFERRER_SUPPLIER, 'gas.GAS', 'supplier.Supplier'),
(GAS_REFERRER_ORDER, 'gas.GASSupplierOrder', ''),
(GAS_REFERRER_WITHDRAWAL, 'gas.Withdrawal', ''),
(GAS_REFERRER_DELIVERY, 'gas.Delivery', ''),
)
## permission-related constants
VIEW = 'view'
LIST = 'list'
CREATE = 'create'
EDIT = 'edit'
DELETE = 'delete'
ALL = 'all' # catchall
PERMISSIONS_LIST = [
(VIEW, _('View')),
(LIST, _('List')),
(CREATE, _('Create')),
(EDIT, _('Edit')),
(DELETE, _('Delete')),
(ALL, _('All')), # catchall
]
class PermissionsRegister(object):
"""Support global register to hold Role and Permissions dicts"""
# a dictionary holding Roles model instances, keyed by name
roles_dict = {}
# a dictionary holding Permission model instances, keyed by Permission's codename
perms_dict = {}
@property
def roles(cls):
return cls.roles_dict.values()
@property
def perms(cls):
return cls.perms_dict.values()
@property
def role_names(cls):
return cls.roles_dict.keys()
@property
def perm_names(cls):
return cls.perms_dict.keys()
def get_role(cls, code):
return cls.roles_dict[code]
def get_perm(cls, code):
return cls.perms_dict[code]
def init_permissions(sender, **kwargs):
## register project-level Roles
for (name, description) in ROLES_LIST:
PermissionsRegister.roles_dict[name] = register_role(name)
## register project-level Permissions
for (codename, name) in PERMISSIONS_LIST:
PermissionsRegister.perms_dict[codename] = register_permission(name, codename)
return
post_syncdb.connect(init_permissions, sender=permissions.models)
| agpl-3.0 | 7,373,385,643,594,439,000 | 26.268519 | 86 | 0.683531 | false |
cheral/orange3 | Orange/canvas/application/outputview.py | 6 | 6738 | """
"""
import traceback
from AnyQt.QtWidgets import QWidget, QPlainTextEdit, QVBoxLayout, QSizePolicy
from AnyQt.QtGui import QTextCursor, QTextCharFormat, QFont
from AnyQt.QtCore import Qt, QObject, QCoreApplication, QThread, QSize
from AnyQt.QtCore import pyqtSignal as Signal
class TerminalView(QPlainTextEdit):
def __init__(self, *args, **kwargs):
QPlainTextEdit.__init__(self, *args, **kwargs)
self.setFrameStyle(QPlainTextEdit.NoFrame)
self.setTextInteractionFlags(Qt.TextBrowserInteraction)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
font = self.font()
font.setStyleHint(QFont.Monospace)
font.setFamily("Monospace")
self.setFont(font)
def sizeHint(self):
metrics = self.fontMetrics()
width = metrics.boundingRect("_" * 81).width()
height = metrics.lineSpacing()
scroll_width = self.verticalScrollBar().width()
size = QSize(width + scroll_width, height * 25)
return size
class OutputView(QWidget):
def __init__(self, parent=None, **kwargs):
QWidget.__init__(self, parent, **kwargs)
self.__lines = 5000
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.__text = TerminalView()
self.__currentCharFormat = self.__text.currentCharFormat()
self.layout().addWidget(self.__text)
def setMaximumLines(self, lines):
"""
Set the maximum number of lines to keep displayed.
"""
if self.__lines != lines:
self.__lines = lines
self.__text.setMaximumBlockCount(lines)
def maximumLines(self):
"""
Return the maximum number of lines in the display.
"""
return self.__lines
def clear(self):
"""
Clear the displayed text.
"""
self.__text.clear()
def setCurrentCharFormat(self, charformat):
"""Set the QTextCharFormat to be used when writing.
"""
if self.__currentCharFormat != charformat:
self.__currentCharFormat = charformat
def currentCharFormat(self):
return self.__currentCharFormat
def toPlainText(self):
"""
Return the full contents of the output view.
"""
return self.__text.toPlainText()
# A file like interface.
def write(self, string):
self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.__text.setCurrentCharFormat(self.__currentCharFormat)
self.__text.insertPlainText(string)
def writelines(self, lines):
self.write("".join(lines))
def flush(self):
pass
def writeWithFormat(self, string, charformat):
self.__text.moveCursor(QTextCursor.End, QTextCursor.MoveAnchor)
self.__text.setCurrentCharFormat(charformat)
self.__text.insertPlainText(string)
def writelinesWithFormat(self, lines, charformat):
self.writeWithFormat("".join(lines), charformat)
def formated(self, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
"""
Return a formated file like object proxy.
"""
charformat = update_char_format(
self.currentCharFormat(), color, background, weight,
italic, underline, font
)
return formater(self, charformat)
def update_char_format(baseformat, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
"""
Return a copy of `baseformat` :class:`QTextCharFormat` with
updated color, weight, background and font properties.
"""
charformat = QTextCharFormat(baseformat)
if color is not None:
charformat.setForeground(color)
if background is not None:
charformat.setBackground(background)
if font is not None:
charformat.setFont(font)
else:
font = update_font(baseformat.font(), weight, italic, underline)
charformat.setFont(font)
return charformat
def update_font(basefont, weight=None, italic=None, underline=None,
pixelSize=None, pointSize=None):
"""
Return a copy of `basefont` :class:`QFont` with updated properties.
"""
font = QFont(basefont)
if weight is not None:
font.setWeight(weight)
if italic is not None:
font.setItalic(italic)
if underline is not None:
font.setUnderline(underline)
if pixelSize is not None:
font.setPixelSize(pixelSize)
if pointSize is not None:
font.setPointSize(pointSize)
return font
class formater(object):
def __init__(self, outputview, charformat):
self.outputview = outputview
self.charformat = charformat
def write(self, string):
self.outputview.writeWithFormat(string, self.charformat)
def writelines(self, lines):
self.outputview.writelines(lines, self.charformat)
def flush(self):
self.outputview.flush()
def formated(self, color=None, background=None, weight=None,
italic=None, underline=None, font=None):
charformat = update_char_format(self.charformat, color, background,
weight, italic, underline, font)
return formater(self.outputview, charformat)
def __enter__(self):
return self
def __exit__(self, *args):
self.outputview = None
self.charformat = None
class TextStream(QObject):
stream = Signal(str)
flushed = Signal()
def __init__(self, parent=None):
QObject.__init__(self, parent)
def write(self, string):
self.stream.emit(string)
def writelines(self, lines):
self.stream.emit("".join(lines))
def flush(self):
self.flushed.emit()
class ExceptHook(QObject):
handledException = Signal(object)
def __init__(self, parent=None, stream=None, canvas=None, **kwargs):
QObject.__init__(self, parent, **kwargs)
self._stream = stream
self._canvas = canvas
def __call__(self, exc_type, exc_value, tb):
if self._stream:
header = exc_type.__name__ + ' Exception'
if QThread.currentThread() != QCoreApplication.instance().thread():
header += " (in non-GUI thread)"
text = traceback.format_exception(exc_type, exc_value, tb)
text.insert(0, '{:-^79}\n'.format(' ' + header + ' '))
text.append('-' * 79 + '\n')
self._stream.writelines(text)
self.handledException.emit(((exc_type, exc_value, tb), self._canvas))
| bsd-2-clause | 1,927,550,279,089,523,200 | 28.552632 | 79 | 0.624221 | false |
sumpfgottheit/arps | arps/views.py | 1 | 3504 | from flask import render_template, request
import numbers
from pprint import pprint
from arps.restserver import app, db, ApiException, apiview
from arps.globals import *
from arps.validation import get_schemas_for_endpoint
from arps.models import *
METHODS = ['GET', 'POST', 'PUT', 'DELETE']
@app.route('/')
def main():
l = []
rules = [rule for rule in sorted(list(app.url_map.iter_rules()), key=lambda rule: rule.rule) if rule.rule.startswith('/api/')]
for rule in rules:
schema_request, schema_response = get_schemas_for_endpoint(rule.endpoint)
l.append({
'path': rule.rule,
'methods': sorted([method for method in rule.methods if method in METHODS]),
'endpoint': rule.endpoint,
'schema_request': schema_request,
'schema_response': schema_response,
'doc': str(app.view_functions[rule.endpoint].__doc__).strip()
})
return render_template('apidoc.html', rules=l)
def populate_object(o :object, d: dict):
changed = set()
unchanged = set()
unkown = set()
for key, value in d.items():
if not isinstance(value, (str, numbers.Number, bool)):
unkown.add(key)
continue
if hasattr(o, key):
if getattr(o, key) == value:
unchanged.add(key)
else:
setattr(o, key, value)
changed.add(key)
else:
unkown.add(key)
return changed, unchanged, unkown
def get_object_or_404(model, *criterion, message=""):
r = db.session.query(model).get(criterion)
if r is None:
raise ApiException(message, code=404)
else:
return r
@app.route('/api/v1.0/users/', methods=['GET'], endpoint=endpoint_user_list)
@apiview()
def user_list():
"""
Return a list of all users
"""
users = db.session.query(User).all()
message = [{**user.as_dict, **{'roles': [role.name for role in user.roles]}} for user in users]
return message
@app.route('/api/v1.0/users/<int:user_id>', methods=['GET'], endpoint=endpoint_user_get)
@apiview()
def user_get(user_id):
"""
Return the user with an specific id.
"""
user = get_object_or_404(User, user_id, message='No User with id %s found' % user_id)
return {**user.as_dict, **{'roles': [role.name for role in user.roles]}}
@app.route('/api/v1.0/users/<int:user_id>', methods=['PUT', 'OPTIONS'], endpoint=endpoint_user_update)
@apiview(needs_json_in_request=True)
def user_update(user_id):
"""
Update the user with the given id with the dictionary provided. All fields are optional.
If the id field is given, it must be the same value as the url leaf.
When updating the user, no fields are required.
"""
data = request.json['content']
if data.get('id', user_id) != user_id:
raise ApiException("User ID in json body and in url must be the same.")
user = get_object_or_404(User, user_id, message='No User with id %s found' % user_id)
populate_object(user, data)
if 'roles' in data:
user.set_roles(data['roles'])
db.session.commit()
return {**user.as_dict, **{'roles': [role.name for role in user.roles]}}
@app.route('/api/v1.0/roles/', methods=['GET'], endpoint=endpoint_role_list)
@apiview()
def role_list():
"""
Return a list of all roles
"""
roles = Role.query.all()
message = [{**user.as_dict, **{'roles': [role.name for role in user.roles]}} for user in users]
return message
| mit | 2,719,949,691,329,565,000 | 33.019417 | 130 | 0.619863 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/pl/ekinomaniak.py | 1 | 4540 | # -*- coding: UTF-8 -*-
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
import urllib
class source:
def __init__(self):
self.priority = 1
self.language = ['pl']
self.domains = ['ekinomaniak.tv']
self.base_link = 'http://ekinomaniak.tv'
self.search_link = '/search_movies'
def search(self, localtitle, year, search_type):
try:
url = urlparse.urljoin(self.base_link, self.search_link)
r = client.request(url, redirect=False, post={'q': cleantitle.query(localtitle), 'sb': ''})
r = client.parseDOM(r, 'div', attrs={'class': 'small-item'})
local_simple = cleantitle.get(localtitle)
for row in r:
name_found = client.parseDOM(row, 'a')[1]
year_found = name_found[name_found.find("(") + 1:name_found.find(")")]
url = client.parseDOM(row, 'a', ret='href')[1]
if not search_type in url:
continue
if cleantitle.get(name_found) == local_simple and year_found == year:
return url
except:
return
def movie(self, imdb, title, localtitle, aliases, year):
return self.search(localtitle, year, 'watch-movies')
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
return self.search(localtvshowtitle, year, 'watch-tv-shows')
def demix(self, e):
result = {"d": "A", "D": "a", "a": "D", "a": "d", "c": "B", "C": "b", "b": "C", "b": "c", "h": "E", "H": "e", "e": "H", "E": "h", "g": "F", "G": "f", "f": "G", "F": "g", "l": "I", "L": "i", "i": "L", "I": "l", "k": "J", "K": "j", "j": "K", "J": "k", "p": "M", "P": "m", "m": "P", "M": "p", "o": "N", "O": "n", "n": "O",
"N": "o", "u": "R", "U": "r", "r": "U", "R": "u", "t": "S", "T": "s", "s": "T", "S": "t", "z": "W", "Z": "w", "w": "Z", "W": "z", "y": "X", "Y": "x", "x": "Y", "X": "y", "3": "1", "1": "3", "4": "2", "2": "4", "8": "5", "5": "8", "7": "6", "6": "7", "0": "9", "9": "0"
}.get(e)
if result == None:
result = '%'
return result
def decodwrd(self, e):
r = ""
for i in range(len(e)):
r += self.demix(e[i])
return r
def decodeURIComponent(self, r):
return urllib.unquote(r.encode("utf-8"))
def shwp(self, e):
r = self.decodwrd(e)
return self.decodeURIComponent(r)
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = client.parseDOM(r, 'li', attrs={'class': 'active'})
for row in r:
span_season = client.parseDOM(row, 'span')[0]
span_season = span_season.split(' ')[1]
if span_season == season:
eps = client.parseDOM(row, 'li')
for ep in eps:
ep_no = client.parseDOM(ep, 'a')[0].split(' ')[1]
if ep_no == episode:
return client.parseDOM(ep, 'a', ret='href')[0]
return None
def get_lang_by_type(self, lang_type):
if 'Lektor' in lang_type:
return 'Lektor'
if 'Dubbing' in lang_type:
return 'Dubbing'
if 'Napisy' in lang_type:
return 'Napisy'
return None
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if url == None: return sources
r = client.request(urlparse.urljoin(self.base_link, url), redirect=False)
info = self.get_lang_by_type(client.parseDOM(r, 'title')[0])
r = client.parseDOM(r, 'div', attrs={'class': 'tab-pane active'})[0]
r = client.parseDOM(r, 'script')[0]
script = r.split('"')[1]
decoded = self.shwp(script)
link = client.parseDOM(decoded, 'iframe', ret='src')[0]
valid, host = source_utils.is_host_valid(link, hostDict)
if not valid: return sources
q = source_utils.check_sd_url(link)
sources.append({'source': host, 'quality': q, 'language': 'pl', 'url': link, 'info': info, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
| gpl-2.0 | -6,697,926,578,966,888,000 | 38.137931 | 327 | 0.490969 | false |
liresearchgroup/submtr | submtr/update.py | 1 | 7634 | """
submtr.update
~~~~~~~~~~~~~
Automatic program updates for submtr.
The ``AutoUpdtate()`` class contains the ``update()`` method to determine
if a new version of submtr is available for the update channel specied in
the ``SUBMTR_CONFIG`` file.
Automatic update checks are run in the background every time submtr is
executed. Only if an update is available is the user prompted.
Three update channels are available: alpha, beta, and release. The default
channel is release, with the option to change the update channel available
in the ``SUBMTR_CONFIG`` file.
All beta and release tarballs are stored on GitHub at
https://github.com/liresearchgroup/submtr/releases
All alpha release tarballs are stored locally at
/home2/jwmay/src/submtr/v5/dist
:copyright: (c) 2014 by Li Research Group.
:license: MIT, see LICENSE for more details.
"""
import os
import os.path
import requests
import shutil
import tarfile
import tempfile
from distutils.version import StrictVersion as Version
from github3 import GitHub
from github3.models import GitHubError
from submtr import __version__ as current_version
from submtr.config import settings
from submtr.ui import debug, error, message, prompt
from submtr.util import get_dir_list
# GitHub repository information for beta and release versions
OWNER = 'liresearchgroup'
REPO_NAME = 'submtr'
# Dev releases (alpha) are not stored on GitHub
DEV_RELEASE_PATH = '/home2/jwmay/src/submtr/v5/dist'
class AutoUpdate():
"""Automatic updater class for submtr.
"""
def update(self):
"""Run the auto-update."""
release = self._get_latest_release()
if release:
message('Updating submtr...')
message('You are currently on the %s channel'
% settings.update_channel)
do_update = prompt('Version %s is available. Update now?'
% release.version, options=['y', 'n'],
return_boolean=True)
if do_update:
message('Installing update...')
tmp_install_dir = tempfile.mkdtemp()
if settings.debug:
debug(tmp_install_dir, 'Temporary Install Directory: ')
release.install(tmp_install_dir)
# tempfile does not automatically remove temp dirs
shutil.rmtree(tmp_install_dir)
# Print the release notes
separator = '*'*30
release_notes = ['\n', separator]
release_notes.extend(release.notes.split('\n'))
release_notes.extend([separator, '\n'])
for line in release_notes:
message(line)
message('Installation complete')
else:
message('Update cancelled')
elif settings.update:
# This message is only printed if the user requested the update,
# otherwise, no message is printed when the auto-update is
# performed and no new release is available.
message('submtr is up to date')
def _get_latest_release(self):
# Return Release object if new release available, else, return None
if 'alpha' in settings.update_channel:
dev_release = self._get_dev_release()
if dev_release:
return dev_release
else:
dev_release = None
# Check for new beta or release versions
if not dev_release:
owner = OWNER
repo_name = REPO_NAME
# If there is an issue accessing the GitHub API, return None
try:
gh = GitHub()
repo = gh.repository(owner, repo_name)
except GitHubError:
return None
for release in repo.iter_releases():
latest_version = release.name
if Version(current_version) < Version(latest_version):
if (((is_beta(latest_version) or
is_release(latest_version)) and is_beta_channel()) or
(is_release(latest_version) and is_release_channel())):
release_url = (
'https://github.com/%s/%s/archive/%s.tar.gz'
% (owner, repo_name, release.tag_name))
release_notes = release.body
return Release(latest_version, release_url,
release_notes)
else:
return None
def _get_dev_release(self):
# Return Release object if new dev release available, else, return None
path = DEV_RELEASE_PATH
dev_releases = get_dir_list(path)
release_name = dev_releases.pop()
release_path = os.path.join(path, release_name)
release_version = release_name.strip('submtr-targz.')
release_notes = 'This is an *alpha* development release'
if settings.debug:
debug(release_name, 'Dev Release Name: ')
debug(release_path, 'Dev Release Path: ')
debug(release_version, 'Dev Release Version: ')
if Version(current_version) < Version(release_version):
return Release(release_version, release_path, release_notes)
else:
return None
class Release():
def __init__(self, version, location, notes):
self.version = version
self.location = location
self.notes = notes
if settings.debug:
debug(self.version, 'Release Version: ')
debug(self.location, 'Release Location: ')
def install(self, path='.'):
# Get release tarball based on if the location is a url or local path
if self.location.startswith('https://'):
tarball = self._download(path)
else:
# We have a dev release
filename = self.location.split('/')[-1]
tarball = os.path.join(path, filename)
shutil.copy(self.location, tarball)
package = self._extract(tarball, path)
self._run_installer(package)
if settings.debug:
debug(tarball, 'Release Tarball: ')
debug(package, 'Release Package: ')
def _download(self, path='.'):
# Download release to path, return the full file path
filename = str(self.location.split('/')[-1])
download_path = os.path.join(path, filename)
r = requests.get(self.location, stream=True)
with open(download_path, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
return download_path
def _extract(self, tarball, path='.'):
# Extract tarball contents, return path to extracted folder
tar = tarfile.open(tarball)
extract_to = os.path.join(path, tar.getnames()[0])
tar.extractall(path)
tar.close()
return extract_to
def _run_installer(self, package):
# Run the setup.py install command
os.chdir(package)
os.system('python setup.py --auto-update install')
def is_beta_channel():
return 'beta' in settings.update_channel
def is_release_channel():
return ('beta' not in settings.update_channel and
'alpha' not in settings.update_channel)
def is_beta(release):
return 'b' in release
def is_release(release):
return 'b' not in release and 'a' not in release
| mit | -3,210,622,861,708,365,300 | 34.840376 | 79 | 0.581085 | false |
MatthewWilkes/reportlab | demos/colors/colortest.py | 1 | 2725 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__='''$Id$'''
import reportlab.pdfgen.canvas
from reportlab.lib import colors
from reportlab.lib.units import inch
def run():
c = reportlab.pdfgen.canvas.Canvas('colortest.pdf')
#do a test of CMYK interspersed with RGB
#first do RGB values
framePage(c, 'Color Demo - RGB Space and CMYK spaces interspersed' )
y = 700
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'cyan')
c.setFillColorCMYK(1,0,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'red')
c.setFillColorRGB(1,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'magenta')
c.setFillColorCMYK(0,1,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'green')
c.setFillColorRGB(0,1,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'yellow')
c.setFillColorCMYK(0,0,1,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'blue')
c.setFillColorRGB(0,0,1)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'black')
c.setFillColorCMYK(0,0,0,1)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.showPage()
#do all named colors
framePage(c, 'Color Demo - RGB Space - page %d' % c.getPageNumber())
all_colors = reportlab.lib.colors.getAllNamedColors().items()
all_colors.sort() # alpha order by name
c.setFont('Times-Roman', 12)
c.drawString(72,730, 'This shows all the named colors in the HTML standard.')
y = 700
for (name, color) in all_colors:
c.setFillColor(colors.black)
c.drawString(100, y, name)
c.setFillColor(color)
c.rect(200, y-10, 300, 30, fill=1)
y = y - 40
if y < 100:
c.showPage()
framePage(c, 'Color Demo - RGB Space - page %d' % c.getPageNumber())
y = 700
c.save()
def framePage(canvas, title):
canvas.setFont('Times-BoldItalic',20)
canvas.drawString(inch, 10.5 * inch, title)
canvas.setFont('Times-Roman',10)
canvas.drawCentredString(4.135 * inch, 0.75 * inch,
'Page %d' % canvas.getPageNumber())
#draw a border
canvas.setStrokeColorRGB(1,0,0)
canvas.setLineWidth(5)
canvas.line(0.8 * inch, inch, 0.8 * inch, 10.75 * inch)
#reset carefully afterwards
canvas.setLineWidth(1)
canvas.setStrokeColorRGB(0,0,0)
if __name__ == '__main__':
run()
| bsd-3-clause | 7,394,168,289,970,068,000 | 24.952381 | 81 | 0.601468 | false |
Brazelton-Lab/bio_utils | bio_utils/__init__.py | 1 | 1086 | #! /usr/bin/env python3
"""Software library containing common bioinformatic functions
Copyright:
__init__.py software library containing common bioinformatic functions
Copyright (C) 2015 William Brazelton, Alex Hyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = 'Alex Hyer, William Brazelton, Christopher Thornton'
__email__ = '[email protected]'
__license__ = 'GPLv3'
__maintainer__ = 'Alex Hyer'
__status__ = 'Production'
__version__ = '1.1.1'
| gpl-3.0 | -5,207,140,348,901,563,000 | 36.448276 | 74 | 0.726519 | false |
iamgp/pyCa | pyCa/Graph.py | 1 | 2559 | from . import *
# Graphics Stuff
import matplotlib.pyplot as plt
class Graph(object):
"""docstring for Graph"""
def __init__(self, Experiment):
self.Experiment = Experiment
self.numberOfStimulantsAdded = 0
self.nameToUse = 0
def plot(self):
print ''
log(self.Experiment.name, colour="yellow")
log('==================', colour="yellow")
for i, col in self.Experiment.data.iteritems():
if i == 0:
col.name = "time"
if col.name == "time":
continue
fig, ax = plt.subplots(1)
plt.plot(self.Experiment.data.time, col, '-')
plt.title(col.name)
ax.set_ylim(
col.min() - (0.1 * col.min()), col.max() + (0.1 * col.max()))
self.nameToUse = 0
print ''
log(col.name, colour="red")
log('--------------------------------------', colour="red")
def onclick(event):
if self.numberOfStimulantsAdded == 0:
x1 = event.xdata
y1 = event.ydata
log(' > 1st point, adding x1:{} y1:{} to {}'.format(
x1, y1, self.Experiment.names[self.nameToUse]),
colour="black")
self.Experiment.currentCell.addFirstPoint(x1, y1)
self.numberOfStimulantsAdded = 1
elif self.numberOfStimulantsAdded == 1:
x2 = event.xdata
y2 = event.ydata
log(' > 2nd point, adding x2:{} y2:{} to {}'.format(
x2, y2, self.Experiment.names[self.nameToUse]),
colour="black")
self.Experiment.currentCell.addSecondPointWithName(
x2, y2, self.Experiment.names[self.nameToUse])
self.numberOfStimulantsAdded = 0
self.nameToUse = self.nameToUse + 1
fig.canvas.mpl_connect('button_press_event', onclick)
for t in self.Experiment.times:
plt.axvspan(t, t + 5, color='red', alpha=0.1)
plt.show()
self.Experiment.currentCell.cellname = col.name
self.Experiment.cells.append(self.Experiment.currentCell)
if self.Experiment.currentCell.describe() is not None:
log(self.Experiment.currentCell.describe(),
colour="black")
self.Experiment.currentCell = Cell()
| gpl-3.0 | 8,551,664,604,400,707,000 | 31.392405 | 77 | 0.490035 | false |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_maths.py | 1 | 20433 | #!/usr/bin/env python
#########################################################################################
#
# Perform mathematical operations on images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Sara Dupont
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import os
import sys
import pickle
import gzip
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import spinalcordtoolbox.math as sct_math
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, list_type, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import extract_fname
def get_parser():
parser = SCTArgumentParser(
description='Perform mathematical operations on images. Some inputs can be either a number or a 4d image or '
'several 3d images separated with ","'
)
mandatory = parser.add_argument_group("MANDATORY ARGUMENTS")
mandatory.add_argument(
"-i",
metavar=Metavar.file,
help="Input file. Example: data.nii.gz",
required=True)
mandatory.add_argument(
"-o",
metavar=Metavar.file,
help='Output file. Example: data_mean.nii.gz',
required=True)
optional = parser.add_argument_group("OPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
basic = parser.add_argument_group('BASIC OPERATIONS')
basic.add_argument(
"-add",
metavar='',
nargs="+",
help='Add following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-sub",
metavar='',
nargs="+",
help='Subtract following input. Can be a number or an image.',
required=False)
basic.add_argument(
"-mul",
metavar='',
nargs="+",
help='Multiply by following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-div",
metavar='',
nargs="+",
help='Divide by following input. Can be a number or an image.',
required=False)
basic.add_argument(
'-mean',
help='Average data across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-rms',
help='Compute root-mean-squared across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-std',
help='Compute STD across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
"-bin",
type=float,
metavar=Metavar.float,
help='Binarize image using specified threshold. Example: 0.5',
required=False)
thresholding = parser.add_argument_group("THRESHOLDING METHODS")
thresholding.add_argument(
'-otsu',
type=int,
metavar=Metavar.int,
help='Threshold image using Otsu algorithm (from skimage). Specify the number of bins (e.g. 16, 64, 128)',
required=False)
thresholding.add_argument(
"-adap",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Adaptive algorithm (from skimage). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-adap 7,0' corresponds to a block size of 7 and an "
"offset of 0.\n"
" - Block size: Odd size of pixel neighborhood which is used to calculate the threshold value. \n"
" - Offset: Constant subtracted from weighted mean of neighborhood to calculate the local threshold "
"value. Suggested offset is 0.",
required=False)
thresholding.add_argument(
"-otsu-median",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Median Otsu algorithm (from dipy). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-otsu-median 3,5' corresponds to a filter size of 3 "
"repeated over 5 iterations.\n"
" - Size: Radius (in voxels) of the applied median filter.\n"
" - Iterations: Number of passes of the median filter.",
required=False)
thresholding.add_argument(
'-percent',
type=int,
help="Threshold image using percentile of its histogram.",
metavar=Metavar.int,
required=False)
thresholding.add_argument(
"-thr",
type=float,
help='Use following number to threshold image (zero below number).',
metavar=Metavar.float,
required=False)
mathematical = parser.add_argument_group("MATHEMATICAL MORPHOLOGY")
mathematical.add_argument(
'-dilate',
type=int,
metavar=Metavar.int,
help="Dilate binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-erode',
type=int,
metavar=Metavar.int,
help="Erode binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-shape',
help="R|Shape of the structuring element for the mathematical morphology operation. Default: ball.\n"
"If a 2D shape {'disk', 'square'} is selected, -dim must be specified.",
required=False,
choices=('square', 'cube', 'disk', 'ball'),
default='ball')
mathematical.add_argument(
'-dim',
type=int,
help="Dimension of the array which 2D structural element will be orthogonal to. For example, if you wish to "
"apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.",
required=False,
choices=(0, 1, 2))
filtering = parser.add_argument_group("FILTERING METHODS")
filtering.add_argument(
"-smooth",
metavar=Metavar.list,
type=list_type(',', float),
help='Gaussian smoothing filtering. Supply values for standard deviations in mm. If a single value is provided, '
'it will be applied to each axis of the image. If multiple values are provided, there must be one value '
'per image axis. (Examples: "-smooth 2.0,3.0,2.0" (3D image), "-smooth 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-laplacian',
metavar=Metavar.list,
type=list_type(',', float),
help='Laplacian filtering. Supply values for standard deviations in mm. If a single value is provided, it will '
'be applied to each axis of the image. If multiple values are provided, there must be one value per '
'image axis. (Examples: "-laplacian 2.0,3.0,2.0" (3D image), "-laplacian 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-denoise',
help='R|Non-local means adaptative denoising from P. Coupe et al. as implemented in dipy. Separate with ". Example: p=1,b=3\n'
' p: (patch radius) similar patches in the non-local means are searched for locally, inside a cube of side 2*p+1 centered at each voxel of interest. Default: p=1\n'
' b: (block radius) the size of the block to be used (2*b+1) in the blockwise non-local means implementation. Default: b=5 '
' Note, block radius must be smaller than the smaller image dimension: default value is lowered for small images)\n'
'To use default parameters, write -denoise 1',
required=False)
similarity = parser.add_argument_group("SIMILARITY METRIC")
similarity.add_argument(
'-mi',
metavar=Metavar.file,
help='Compute the mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mutual_info_score.html',
required=False)
similarity.add_argument(
'-minorm',
metavar=Metavar.file,
help='Compute the normalized mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html',
required=False)
similarity.add_argument(
'-corr',
metavar=Metavar.file,
help='Compute the cross correlation (CC) between both input files (-i and -cc).',
required=False)
misc = parser.add_argument_group("MISC")
misc.add_argument(
'-symmetrize',
type=int,
help='Symmetrize data along the specified dimension.',
required=False,
choices=(0, 1, 2))
misc.add_argument(
'-type',
required=False,
help='Output type.',
choices=('uint8', 'int16', 'int32', 'float32', 'complex64', 'float64', 'int8', 'uint16', 'uint32', 'int64',
'uint64'))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
dim_list = ['x', 'y', 'z', 't']
fname_in = arguments.i
fname_out = arguments.o
output_type = arguments.type
# Open file(s)
im = Image(fname_in)
data = im.data # 3d or 4d numpy array
dim = im.dim
# run command
if arguments.otsu is not None:
param = arguments.otsu
data_out = sct_math.otsu(data, param)
elif arguments.adap is not None:
param = arguments.adap
data_out = sct_math.adap(data, param[0], param[1])
elif arguments.otsu_median is not None:
param = arguments.otsu_median
data_out = sct_math.otsu_median(data, param[0], param[1])
elif arguments.thr is not None:
param = arguments.thr
data_out = sct_math.threshold(data, param)
elif arguments.percent is not None:
param = arguments.percent
data_out = sct_math.perc(data, param)
elif arguments.bin is not None:
bin_thr = arguments.bin
data_out = sct_math.binarize(data, bin_thr=bin_thr)
elif arguments.add is not None:
data2 = get_data_or_scalar(arguments.add, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.sum(data_concat, axis=3)
elif arguments.sub is not None:
data2 = get_data_or_scalar(arguments.sub, data)
data_out = data - data2
elif arguments.laplacian is not None:
sigmas = arguments.laplacian
if len(sigmas) == 1:
sigmas = [sigmas for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.laplacian(data, sigmas)
elif arguments.mul is not None:
data2 = get_data_or_scalar(arguments.mul, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.prod(data_concat, axis=3)
elif arguments.div is not None:
data2 = get_data_or_scalar(arguments.div, data)
data_out = np.divide(data, data2)
elif arguments.mean is not None:
dim = dim_list.index(arguments.mean)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.mean(data, dim)
elif arguments.rms is not None:
dim = dim_list.index(arguments.rms)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))
elif arguments.std is not None:
dim = dim_list.index(arguments.std)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.std(data, dim, ddof=1)
elif arguments.smooth is not None:
sigmas = arguments.smooth
if len(sigmas) == 1:
sigmas = [sigmas[0] for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.smooth(data, sigmas)
elif arguments.dilate is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -dilate with 2D morphological kernel'))
data_out = sct_math.dilate(data, size=arguments.dilate, shape=arguments.shape, dim=arguments.dim)
elif arguments.erode is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -erode with 2D morphological kernel'))
data_out = sct_math.erode(data, size=arguments.erode, shape=arguments.shape, dim=arguments.dim)
elif arguments.denoise is not None:
# parse denoising arguments
p, b = 1, 5 # default arguments
list_denoise = (arguments.denoise).split(",")
for i in list_denoise:
if 'p' in i:
p = int(i.split('=')[1])
if 'b' in i:
b = int(i.split('=')[1])
data_out = sct_math.denoise_nlmeans(data, patch_radius=p, block_radius=b)
elif arguments.symmetrize is not None:
data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2)
elif arguments.mi is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.mi)
compute_similarity(im, im_2, fname_out, metric='mi', metric_full='Mutual information', verbose=verbose)
data_out = None
elif arguments.minorm is not None:
im_2 = Image(arguments.minorm)
compute_similarity(im, im_2, fname_out, metric='minorm', metric_full='Normalized Mutual information', verbose=verbose)
data_out = None
elif arguments.corr is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.corr)
compute_similarity(im, im_2, fname_out, metric='corr', metric_full='Pearson correlation coefficient', verbose=verbose)
data_out = None
# if no flag is set
else:
data_out = None
printv(parser.error('ERROR: you need to specify an operation to do on the input image'))
if data_out is not None:
# Write output
nii_out = Image(fname_in) # use header of input file
nii_out.data = data_out
nii_out.save(fname_out, dtype=output_type)
# TODO: case of multiple outputs
# assert len(data_out) == n_out
# if n_in == n_out:
# for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
# im_in.data = d_out
# im_in.absolutepath = fn_out
# if arguments.w is not None:
# im_in.hdr.set_intent('vector', (), '')
# im_in.save()
# elif n_out == 1:
# nii[0].data = data_out[0]
# nii[0].absolutepath = fname_out[0]
# if arguments.w is not None:
# nii[0].hdr.set_intent('vector', (), '')
# nii[0].save()
# elif n_out > n_in:
# for dat_out, name_out in zip(data_out, fname_out):
# im_out = nii[0].copy()
# im_out.data = dat_out
# im_out.absolutepath = name_out
# if arguments.w is not None:
# im_out.hdr.set_intent('vector', (), '')
# im_out.save()
# else:
# printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))
# display message
if data_out is not None:
display_viewer_syntax([fname_out], verbose=verbose)
else:
printv('\nDone! File created: ' + fname_out, verbose, 'info')
def get_data(list_fname):
"""
Get data from list of file names
:param list_fname:
:return: 3D or 4D numpy array.
"""
try:
nii = [Image(f_in) for f_in in list_fname]
except Exception as e:
printv(str(e), 1, 'error') # file does not exist, exit program
data0 = nii[0].data
data = nii[0].data
# check that every images have same shape
for i in range(1, len(nii)):
if not np.shape(nii[i].data) == np.shape(data0):
printv('\nWARNING: shape(' + list_fname[i] + ')=' + str(np.shape(nii[i].data)) + ' incompatible with shape(' + list_fname[0] + ')=' + str(np.shape(data0)), 1, 'warning')
printv('\nERROR: All input images must have same dimensions.', 1, 'error')
else:
data = sct_math.concatenate_along_4th_dimension(data, nii[i].data)
return data
def get_data_or_scalar(argument, data_in):
"""
Get data from list of file names (scenario 1) or scalar (scenario 2)
:param argument: list of file names of scalar
:param data_in: if argument is scalar, use data to get np.shape
:return: 3d or 4d numpy array
"""
# try to convert argument in float
try:
# build data2 with same shape as data
data_out = data_in[:, :, :] * 0 + float(argument[0])
# if conversion fails, it should be a string (i.e. file name)
except ValueError:
data_out = get_data(argument)
return data_out
def compute_similarity(img1: Image, img2: Image, fname_out: str, metric: str, metric_full: str, verbose):
"""
Sanitize input and compute similarity metric between two images data.
"""
if img1.data.size != img2.data.size:
raise ValueError(f"Input images don't have the same size! \nPlease use \"sct_register_multimodal -i im1.nii.gz -d im2.nii.gz -identity 1\" to put the input images in the same space")
res, data1_1d, data2_1d = sct_math.compute_similarity(img1.data, img2.data, metric=metric)
if verbose > 1:
matplotlib.use('Agg')
plt.plot(data1_1d, 'b')
plt.plot(data2_1d, 'r')
plt.title('Similarity: ' + metric_full + ' = ' + str(res))
plt.savefig('fig_similarity.png')
path_out, filename_out, ext_out = extract_fname(fname_out)
if ext_out not in ['.txt', '.pkl', '.pklz', '.pickle']:
raise ValueError(f"The output file should a text file or a pickle file. Received extension: {ext_out}")
if ext_out == '.txt':
with open(fname_out, 'w') as f:
f.write(metric_full + ': \n' + str(res))
elif ext_out == '.pklz':
pickle.dump(res, gzip.open(fname_out, 'wb'), protocol=2)
else:
pickle.dump(res, open(fname_out, 'w'), protocol=2)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit | 2,048,659,252,759,735,300 | 39.222441 | 192 | 0.597856 | false |
Ultimaker/Cura | plugins/DigitalLibrary/src/DFFileExportAndUploadManager.py | 1 | 19837 | # Copyright (c) 2021 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import json
import threading
from json import JSONDecodeError
from typing import List, Dict, Any, Callable, Union, Optional
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtNetwork import QNetworkReply
from UM.FileHandler.FileHandler import FileHandler
from UM.Logger import Logger
from UM.Message import Message
from UM.Scene.SceneNode import SceneNode
from cura.CuraApplication import CuraApplication
from .DFLibraryFileUploadRequest import DFLibraryFileUploadRequest
from .DFLibraryFileUploadResponse import DFLibraryFileUploadResponse
from .DFPrintJobUploadRequest import DFPrintJobUploadRequest
from .DFPrintJobUploadResponse import DFPrintJobUploadResponse
from .DigitalFactoryApiClient import DigitalFactoryApiClient
from .ExportFileJob import ExportFileJob
class DFFileExportAndUploadManager:
"""
Class responsible for exporting the scene and uploading the exported data to the Digital Factory Library. Since 3mf
and UFP files may need to be uploaded at the same time, this class keeps a single progress and success message for
both files and updates those messages according to the progress of both the file job uploads.
"""
def __init__(self, file_handlers: Dict[str, FileHandler],
nodes: List[SceneNode],
library_project_id: str,
library_project_name: str,
file_name: str,
formats: List[str],
on_upload_error: Callable[[], Any],
on_upload_success: Callable[[], Any],
on_upload_finished: Callable[[], Any] ,
on_upload_progress: Callable[[int], Any]) -> None:
self._file_handlers = file_handlers # type: Dict[str, FileHandler]
self._nodes = nodes # type: List[SceneNode]
self._library_project_id = library_project_id # type: str
self._library_project_name = library_project_name # type: str
self._file_name = file_name # type: str
self._upload_jobs = [] # type: List[ExportFileJob]
self._formats = formats # type: List[str]
self._api = DigitalFactoryApiClient(application = CuraApplication.getInstance(), on_error = lambda error: Logger.log("e", str(error)))
# Functions of the parent class that should be called based on the upload process output
self._on_upload_error = on_upload_error
self._on_upload_success = on_upload_success
self._on_upload_finished = on_upload_finished
self._on_upload_progress = on_upload_progress
# Lock used for updating the progress message (since the progress is changed by two parallel upload jobs) or
# show the success message (once both upload jobs are done)
self._message_lock = threading.Lock()
self._file_upload_job_metadata = self.initializeFileUploadJobMetadata() # type: Dict[str, Dict[str, Any]]
self.progress_message = Message(
title = "Uploading...",
text = "Uploading files to '{}'".format(self._library_project_name),
progress = -1,
lifetime = 0,
dismissable = False,
use_inactivity_timer = False
)
self._generic_success_message = Message(
text = "Your {} uploaded to '{}'.".format("file was" if len(self._file_upload_job_metadata) <= 1 else "files were", self._library_project_name),
title = "Upload successful",
lifetime = 0,
)
self._generic_success_message.addAction(
"open_df_project",
"Open project",
"open-folder", "Open the project containing the file in Digital Library"
)
self._generic_success_message.actionTriggered.connect(self._onMessageActionTriggered)
def _onCuraProjectFileExported(self, job: ExportFileJob) -> None:
"""Handler for when the DF Library workspace file (3MF) has been created locally.
It can now be sent over the Digital Factory API.
"""
if not job.getOutput():
self._onJobExportError(job.getFileName())
return
self._file_upload_job_metadata[job.getFileName()]["export_job_output"] = job.getOutput()
request = DFLibraryFileUploadRequest(
content_type = job.getMimeType(),
file_name = job.getFileName(),
file_size = len(job.getOutput()),
library_project_id = self._library_project_id
)
self._api.requestUpload3MF(request, on_finished = self._uploadFileData, on_error = self._onRequestUploadCuraProjectFileFailed)
def _onPrintFileExported(self, job: ExportFileJob) -> None:
"""Handler for when the DF Library print job file (UFP) has been created locally.
It can now be sent over the Digital Factory API.
"""
if not job.getOutput():
self._onJobExportError(job.getFileName())
return
self._file_upload_job_metadata[job.getFileName()]["export_job_output"] = job.getOutput()
request = DFPrintJobUploadRequest(
content_type = job.getMimeType(),
job_name = job.getFileName(),
file_size = len(job.getOutput()),
library_project_id = self._library_project_id
)
self._api.requestUploadUFP(request, on_finished = self._uploadFileData, on_error = self._onRequestUploadPrintFileFailed)
def _uploadFileData(self, file_upload_response: Union[DFLibraryFileUploadResponse, DFPrintJobUploadResponse]) -> None:
"""Uploads the exported file data after the file or print job upload has been registered at the Digital Factory
Library API.
:param file_upload_response: The response received from the Digital Factory Library API.
"""
if isinstance(file_upload_response, DFLibraryFileUploadResponse):
file_name = file_upload_response.file_name
elif isinstance(file_upload_response, DFPrintJobUploadResponse):
file_name = file_upload_response.job_name if file_upload_response.job_name is not None else ""
else:
Logger.log("e", "Wrong response type received. Aborting uploading file to the Digital Library")
return
with self._message_lock:
self.progress_message.show()
self._file_upload_job_metadata[file_name]["file_upload_response"] = file_upload_response
job_output = self._file_upload_job_metadata[file_name]["export_job_output"]
with self._message_lock:
self._file_upload_job_metadata[file_name]["upload_status"] = "uploading"
self._api.uploadExportedFileData(file_upload_response,
job_output,
on_finished = self._onFileUploadFinished,
on_success = self._onUploadSuccess,
on_progress = self._onUploadProgress,
on_error = self._onUploadError)
def _onUploadProgress(self, filename: str, progress: int) -> None:
"""
Updates the progress message according to the total progress of the two files and displays it to the user. It is
made thread-safe with a lock, since the progress can be updated by two separate upload jobs
:param filename: The name of the file for which we have progress (including the extension).
:param progress: The progress percentage
"""
with self._message_lock:
self._file_upload_job_metadata[filename]["upload_progress"] = progress
self._file_upload_job_metadata[filename]["upload_status"] = "uploading"
total_progress = self.getTotalProgress()
self.progress_message.setProgress(total_progress)
self.progress_message.show()
self._on_upload_progress(progress)
def _onUploadSuccess(self, filename: str) -> None:
"""
Sets the upload status to success and the progress of the file with the given filename to 100%. This function is
should be called only if the file has uploaded all of its data successfully (i.e. no error occurred during the
upload process).
:param filename: The name of the file that was uploaded successfully (including the extension).
"""
with self._message_lock:
self._file_upload_job_metadata[filename]["upload_status"] = "success"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
self._on_upload_success()
def _onFileUploadFinished(self, filename: str) -> None:
"""
Callback that makes sure the correct messages are displayed according to the statuses of the individual jobs.
This function is called whenever an upload job has finished, regardless if it had errors or was successful.
Both jobs have to have finished for the messages to show.
:param filename: The name of the file that has finished uploading (including the extension).
"""
with self._message_lock:
# All files have finished their uploading process
if all([(file_upload_job["upload_progress"] == 100 and file_upload_job["upload_status"] != "uploading") for file_upload_job in self._file_upload_job_metadata.values()]):
# Reset and hide the progress message
self.progress_message.setProgress(-1)
self.progress_message.hide()
# All files were successfully uploaded.
if all([(file_upload_job["upload_status"] == "success") for file_upload_job in self._file_upload_job_metadata.values()]):
# Show a single generic success message for all files
self._generic_success_message.show()
else: # One or more files failed to upload.
# Show individual messages for each file, according to their statuses
for filename, upload_job_metadata in self._file_upload_job_metadata.items():
if upload_job_metadata["upload_status"] == "success":
upload_job_metadata["file_upload_success_message"].show()
else:
upload_job_metadata["file_upload_failed_message"].show()
# Call the parent's finished function
self._on_upload_finished()
def _onJobExportError(self, filename: str) -> None:
"""
Displays an appropriate message when the process to export a file fails.
:param filename: The name of the file that failed to be exported (including the extension).
"""
Logger.log("d", "Error while exporting file '{}'".format(filename))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename]["upload_status"] = "failed"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
self._file_upload_job_metadata[filename]["file_upload_failed_message"] = Message(
text = "Failed to export the file '{}'. The upload process is aborted.".format(filename),
title = "Export error",
lifetime = 0
)
self._on_upload_error()
self._onFileUploadFinished(filename)
def _onRequestUploadCuraProjectFileFailed(self, reply: "QNetworkReply", network_error: "QNetworkReply.NetworkError") -> None:
"""
Displays an appropriate message when the request to upload the Cura project file (.3mf) to the Digital Library fails.
This means that something went wrong with the initial request to create a "file" entry in the digital library.
"""
reply_string = bytes(reply.readAll()).decode()
filename_3mf = self._file_name + ".3mf"
Logger.log("d", "An error occurred while uploading the Cura project file '{}' to the Digital Library project '{}': {}".format(filename_3mf, self._library_project_id, reply_string))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename_3mf]["upload_status"] = "failed"
self._file_upload_job_metadata[filename_3mf]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename_3mf]["file_upload_failed_message"] = Message(
text = "Failed to upload the file '{}' to '{}'. {}".format(filename_3mf, self._library_project_name, human_readable_error),
title = "File upload error",
lifetime = 0
)
self._on_upload_error()
self._onFileUploadFinished(filename_3mf)
def _onRequestUploadPrintFileFailed(self, reply: "QNetworkReply", network_error: "QNetworkReply.NetworkError") -> None:
"""
Displays an appropriate message when the request to upload the print file (.ufp) to the Digital Library fails.
This means that something went wrong with the initial request to create a "file" entry in the digital library.
"""
reply_string = bytes(reply.readAll()).decode()
filename_ufp = self._file_name + ".ufp"
Logger.log("d", "An error occurred while uploading the print job file '{}' to the Digital Library project '{}': {}".format(filename_ufp, self._library_project_id, reply_string))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename_ufp]["upload_status"] = "failed"
self._file_upload_job_metadata[filename_ufp]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename_ufp]["file_upload_failed_message"] = Message(
title = "File upload error",
text = "Failed to upload the file '{}' to '{}'. {}".format(filename_ufp, self._library_project_name, human_readable_error),
lifetime = 0
)
self._on_upload_error()
self._onFileUploadFinished(filename_ufp)
@staticmethod
def extractErrorTitle(reply_body: Optional[str]) -> str:
error_title = ""
if reply_body:
try:
reply_dict = json.loads(reply_body)
except JSONDecodeError:
Logger.logException("w", "Unable to extract title from reply body")
return error_title
if "errors" in reply_dict and len(reply_dict["errors"]) >= 1 and "title" in reply_dict["errors"][0]:
error_title = reply_dict["errors"][0]["title"]
return error_title
def _onUploadError(self, filename: str, reply: "QNetworkReply", error: "QNetworkReply.NetworkError") -> None:
"""
Displays the given message if uploading the mesh has failed due to a generic error (i.e. lost connection).
If one of the two files fail, this error function will set its progress as finished, to make sure that the
progress message doesn't get stuck.
:param filename: The name of the file that failed to upload (including the extension).
"""
reply_string = bytes(reply.readAll()).decode()
Logger.log("d", "Error while uploading '{}' to the Digital Library project '{}'. Reply: {}".format(filename, self._library_project_id, reply_string))
with self._message_lock:
# Set the progress to 100% when the upload job fails, to avoid having the progress message stuck
self._file_upload_job_metadata[filename]["upload_status"] = "failed"
self._file_upload_job_metadata[filename]["upload_progress"] = 100
human_readable_error = self.extractErrorTitle(reply_string)
self._file_upload_job_metadata[filename]["file_upload_failed_message"] = Message(
title = "File upload error",
text = "Failed to upload the file '{}' to '{}'. {}".format(self._file_name, self._library_project_name, human_readable_error),
lifetime = 0
)
self._on_upload_error()
def getTotalProgress(self) -> int:
"""
Returns the total upload progress of all the upload jobs
:return: The average progress percentage
"""
return int(sum([file_upload_job["upload_progress"] for file_upload_job in self._file_upload_job_metadata.values()]) / len(self._file_upload_job_metadata.values()))
def _onMessageActionTriggered(self, message, action):
if action == "open_df_project":
project_url = "{}/app/library/project/{}?wait_for_new_files=true".format(CuraApplication.getInstance().ultimakerDigitalFactoryUrl, self._library_project_id)
QDesktopServices.openUrl(QUrl(project_url))
message.hide()
def start(self) -> None:
for job in self._upload_jobs:
job.start()
def initializeFileUploadJobMetadata(self) -> Dict[str, Any]:
metadata = {}
self._upload_jobs = []
if "3mf" in self._formats and "3mf" in self._file_handlers and self._file_handlers["3mf"]:
filename_3mf = self._file_name + ".3mf"
metadata[filename_3mf] = {
"export_job_output" : None,
"upload_progress" : -1,
"upload_status" : "",
"file_upload_response": None,
"file_upload_success_message": Message(
text = "'{}' was uploaded to '{}'.".format(filename_3mf, self._library_project_name),
title = "Upload successful",
lifetime = 0,
),
"file_upload_failed_message": Message(
text = "Failed to upload the file '{}' to '{}'.".format(filename_3mf, self._library_project_name),
title = "File upload error",
lifetime = 0
)
}
job_3mf = ExportFileJob(self._file_handlers["3mf"], self._nodes, self._file_name, "3mf")
job_3mf.finished.connect(self._onCuraProjectFileExported)
self._upload_jobs.append(job_3mf)
if "ufp" in self._formats and "ufp" in self._file_handlers and self._file_handlers["ufp"]:
filename_ufp = self._file_name + ".ufp"
metadata[filename_ufp] = {
"export_job_output" : None,
"upload_progress" : -1,
"upload_status" : "",
"file_upload_response": None,
"file_upload_success_message": Message(
text = "'{}' was uploaded to '{}'.".format(filename_ufp, self._library_project_name),
title = "Upload successful",
lifetime = 0,
),
"file_upload_failed_message": Message(
text = "Failed to upload the file '{}' to '{}'.".format(filename_ufp, self._library_project_name),
title = "File upload error",
lifetime = 0
)
}
job_ufp = ExportFileJob(self._file_handlers["ufp"], self._nodes, self._file_name, "ufp")
job_ufp.finished.connect(self._onPrintFileExported)
self._upload_jobs.append(job_ufp)
return metadata
| lgpl-3.0 | -487,210,490,165,346,050 | 52.182306 | 188 | 0.616978 | false |
scotthuang1989/Python-3-Module-of-the-Week | concurrency/asyncio/asyncio_echo_server_protocol.py | 1 | 1651 | import asyncio
import logging
import sys
SERVER_ADDRESS = ('localhost', 10000)
logging.basicConfig(
level=logging.DEBUG,
format='%(name)s: %(message)s',
stream=sys.stderr,
)
log = logging.getLogger('main')
event_loop = asyncio.get_event_loop()
class EchoServer(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
self.address = transport.get_extra_info('peername')
self.log = logging.getLogger(
'EchoServer_{}_{}'.format(*self.address)
)
self.log.debug('connection accepted')
def data_received(self, data):
self.log.debug('received {!r}'.format(data))
self.transport.write(data)
self.log.debug('sent {!r}'.format(data))
def eof_received(self):
self.log.debug('received EOF')
if self.transport.can_write_eof():
self.transport.write_eof()
def connection_lost(self, error):
if error:
self.log.error('ERROR: {}'.format(error))
else:
self.log.debug('closing')
super().connection_lost(error)
# Create the server and let the loop finish the coroutine before
# starting the real event loop.
factory = event_loop.create_server(EchoServer, *SERVER_ADDRESS)
server = event_loop.run_until_complete(factory)
log.debug('starting up on {} port {}'.format(*SERVER_ADDRESS))
# Enter the event loop permanently to handle all connections.
try:
event_loop.run_forever()
finally:
log.debug('closing server')
server.close()
event_loop.run_until_complete(server.wait_closed())
log.debug('closing event loop')
event_loop.close()
| apache-2.0 | -3,189,786,440,921,151,000 | 27.964912 | 64 | 0.654755 | false |
pymanopt/pymanopt | pymanopt/manifolds/__init__.py | 1 | 1111 | __all__ = [
"ComplexCircle",
"ComplexGrassmann",
"Elliptope",
"Euclidean",
"FixedRankEmbedded",
"Grassmann",
"Oblique",
"PSDFixedRank",
"PSDFixedRankComplex",
"Product",
"SkewSymmetric",
"SpecialOrthogonalGroup",
"Sphere",
"SphereSubspaceComplementIntersection",
"SphereSubspaceIntersection",
"Stiefel",
"StrictlyPositiveVectors",
"Symmetric",
"SymmetricPositiveDefinite"
]
from .complex_circle import ComplexCircle
from .complex_grassmann import ComplexGrassmann
from .euclidean import Euclidean, SkewSymmetric, Symmetric
from .fixed_rank import FixedRankEmbedded
from .grassmann import Grassmann
from .oblique import Oblique
from .product import Product
from .psd import (Elliptope, PSDFixedRank, PSDFixedRankComplex,
SymmetricPositiveDefinite)
from .special_orthogonal_group import SpecialOrthogonalGroup
from .sphere import (Sphere, SphereSubspaceComplementIntersection,
SphereSubspaceIntersection)
from .stiefel import Stiefel
from .strictly_positive_vectors import StrictlyPositiveVectors
| bsd-3-clause | 2,313,783,084,934,162,400 | 29.861111 | 66 | 0.741674 | false |
Tenrec-Builders/pi-scan | src/stick.py | 1 | 3424 | import dbus
def search():
bus = dbus.SystemBus()
udisks = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
'/org/freedesktop/UDisks2'),
'org.freedesktop.DBus.ObjectManager')
listDevices = udisks.get_dbus_method('GetManagedObjects')
result = []
for key, value in listDevices().items():
try:
if ('org.freedesktop.UDisks2.Block' in value and
'org.freedesktop.UDisks2.Filesystem' in value):
block = value['org.freedesktop.UDisks2.Block']
drive = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
block['Drive']),
'org.freedesktop.UDisks2.Drive')
driveprop = dbus.Interface(
drive,
'org.freedesktop.DBus.Properties')
busType = driveprop.Get('org.freedesktop.UDisks2.Drive',
'ConnectionBus')
if busType == 'usb':
result.append(Stick(key))
except Exception as e:
pass
return result
def searchAndUnmount(shouldForce):
result = 0
sticks = search()
for stick in sticks:
if stick.get_mount_point() is not None:
result += 1
stick.unmount(shouldForce)
return result
class Stick:
def __init__(self, path):
self.path = path
def mount(self):
mount_point = self.get_mount_point()
try:
if mount_point is None:
bus = dbus.SystemBus()
fs = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
self.path),
'org.freedesktop.UDisks2.Filesystem')
mount = fs.get_dbus_method(
"Mount",
dbus_interface="org.freedesktop.UDisks2.Filesystem")
mount_point = mount([])
except Exception, e:
print 'Failed to mount: ', e
return mount_point
def get_mount_point(self):
mount_point = None
try:
bus = dbus.SystemBus()
fs = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
self.path),
'org.freedesktop.UDisks2.Filesystem')
fsprop = dbus.Interface(fs, 'org.freedesktop.DBus.Properties')
old_mounts = fsprop.Get('org.freedesktop.UDisks2.Filesystem',
'MountPoints')
if len(old_mounts) > 0:
mount_point = bytearray(old_mounts[0]).decode('utf-8')
except Exception, e:
print 'Failed to get/parse mount point', e
return mount_point
def unmount(self, should_force):
mount_point = self.get_mount_point()
try:
if mount_point is not None:
bus = dbus.SystemBus()
fs = dbus.Interface(
bus.get_object('org.freedesktop.UDisks2',
self.path),
'org.freedesktop.UDisks2.Filesystem')
unmount = fs.get_dbus_method(
"Unmount",
dbus_interface="org.freedesktop.UDisks2.Filesystem")
unmount({'force': should_force})
except Exception, e:
print 'Failed to unmount: ', e
def main():
mount_point = None
sticks = search()
if len(sticks) == 0:
print 'No Stick Found'
elif len(sticks) > 1:
print len(sticks), ' sticks found. Try unplugging one.'
else:
mount_point = sticks[0].get_mount_point()
if mount_point is None:
mount_point = sticks[0].mount()
print 'Mounted at: ' + mount_point
else:
print 'Unmounting. Was mounted at: ' + mount_point
sticks[0].unmount(True)
#main()
| bsd-2-clause | 8,485,670,437,875,832,000 | 29.846847 | 68 | 0.592874 | false |
dhalleine/tensorflow | tensorflow/contrib/layers/python/layers/optimizers.py | 1 | 9885 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
moving_average_decay=0.9,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None):
"""Given loss and parameters for optimizer, returns a training op.
Args:
loss: Tensor, 0 dimensional.
global_step: Tensor, step counter for each update.
learning_rate: float or Tensor, magnitude of update per each training step.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of tf.Optimizer that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantion of tf.Optimizer sub-class
and have `compute_gradients` and `apply_gradients` functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float or `None`, clips gradients by this value.
moving_average_decay: float or None, takes into account previous loss
to make learning smoother due to outliers.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: tf.train.exponential_decay.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
Returns:
Training op.
Raises:
ValueError: if optimizer is wrong type.
"""
with vs.variable_op_scope([loss, global_step], name, "OptimizeLoss"):
# Update ops take UPDATE_OPS collection if not provided.
update_ops = (set(update_ops or []) or
set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)))
# Make sure update ops are ran before computing loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name="update_barrier")
loss = control_flow_ops.with_dependencies([barrier], loss)
# Moving average of the loss with decay.
if moving_average_decay is not None:
# Generate moving averages of the loss.
loss_averages = train.ExponentialMovingAverage(moving_average_decay,
name="avg")
loss_averages_op = loss_averages.apply([loss])
logging_ops.scalar_summary("loss/mean", loss_averages.average(loss))
loss = control_flow_ops.with_dependencies([loss_averages_op], loss)
# Learning rate variable, with possible decay.
if (isinstance(learning_rate, ops.Tensor)
and learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
lr = vs.get_variable(
"learning_rate", [], trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (
str(learning_rate), str(type(learning_rate))))
if learning_rate_decay_fn is not None:
lr = learning_rate_decay_fn(lr, global_step)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s."
% (", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif isinstance(optimizer, type) and issubclass(optimizer,
optimizer_.Optimizer):
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer or instance of "
"subclass of Optimizer. Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(loss, variables)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(
gradients, gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
# Optionally clip gradients by global norm.
if clip_gradients is not None:
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
# Add scalar summary for loss.
logging_ops.scalar_summary("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
logging_ops.histogram_summary(variable.name, variable)
logging_ops.histogram_summary(variable.name + "/gradients", grad_values)
logging_ops.histogram_summary(variable.name + "/gradient_norm",
clip_ops.global_norm([grad_values]))
# Create gradient updates.
grad_updates = opt.apply_gradients(gradients,
global_step=global_step,
name="train")
# Make sure total_loss is valid.
final_loss = array_ops.check_numerics(loss, "Loss is inf or nan")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies(
[grad_updates], final_loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients,
clip_gradients)
return list(zip(clipped_gradients, variables))
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.name
grad *= constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
| apache-2.0 | 2,485,348,243,574,969,000 | 42.165939 | 80 | 0.653111 | false |
lab-robotics-unipv/pyFUZZYgenerator | core/ModelType.py | 1 | 2708 | class ModelType:
def __init__(self):
self.type = None
self.properties = {}
self.models = []
self.logic_function_name = None
self.init_function_name = None
self.output_function_name = None
def __eq__(self, other):
if other.type == self.type:
return True
return False
def update(self, model):
self.models.append(model)
class TooBigDimensionsException(Exception):
pass
class ModelNotFoundException(Exception):
pass
class FINDModelType(ModelType):
def __init__(self):
super().__init__()
self.type = "F-IND"
self.properties["max_input_n"] = 0
self.logic_function_name = "findLogic"
self.init_function_name = "initFindLogic"
self.output_function_name = "calculateFindIndex"
def update(self, model):
super().update(model)
self.properties["max_input_n"] = max(
len(model.input_var), self.properties["max_input_n"])
def get_squaredint_t(self):
ninput = self.properties["max_input_n"]
if ninput < 8:
return "uint8_t"
elif ninput < 16:
return "uint16_t"
elif ninput < 32:
return "uint32_t"
else:
raise TooBigDimensionsException
class FISModelType(ModelType):
def __init__(self):
super().__init__()
self.type = "FIS"
self.logic_function_name = "fisLogic"
self.init_function_name = "initFisLogic"
self.output_function_name = "calculateFisOutput"
def update(self, model):
super().update(model)
class FEQModelType(ModelType):
def __init__(self):
super().__init__()
self.type = "FIS"
self.logic_function_name = "feqLogic"
self.init_function_name = "initFeqLogic"
self.output_function_name = "calculateFeqOutput"
def update(self, model):
super().update(model)
class ModelTypeSet:
def __init__(self):
self.model_type_list = []
def update(self, model):
model_type = None
if model.type.upper() == 'F-IND':
model_type = FINDModelType()
elif model.type.upper() == 'FEQ':
model_type = FEQModelType()
elif model.type.upper() != 'FIS':
model_type = FISModelType()
else:
raise ModelNotFoundException
if model_type not in self.model_type_list:
self.model_type_list.append(model_type)
actual_model_type = self.model_type_list[self.model_type_list.index(
model_type)]
actual_model_type.update(model)
def __iter__(self):
return self.model_type_list.__iter__()
| lgpl-3.0 | 2,298,482,648,726,240,300 | 26.08 | 76 | 0.57644 | false |
sippeproject/vagoth | vagoth/virt/utils/mc_json_rpc.py | 1 | 1790 | #!/usr/bin/python
#
# Vagoth Cluster Management Framework
# Copyright (C) 2013 Robert Thomson
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
from subprocess import Popen, PIPE
from os.path import abspath, dirname, join
import json
class MCollectiveException(Exception): pass
def mcollective_call(agent, action, identity=None, timeout=None, **kwargs):
mcdict = {
"agent": agent,
"action": action,
"arguments": kwargs,
}
if identity is not None:
mcdict["identity"] = identity
if timeout is not None:
mcdict["timeout"] = timeout
mcjson = json.dumps(mcdict)
ruby_script=join(abspath(dirname(__file__)), "mc_json_rpc.rb")
process = Popen([ruby_script, "-"], stdin=PIPE, stdout=PIPE)
process.stdin.write(mcjson)
process.stdin.close()
result = process.stdout.read()
process.stdout.close()
process.wait()
if process.returncode == 0:
return json.loads(result)
else:
raise MCollectiveException(
"mc-json-rpc.rb exited with {0}: {1}".format(
process.returncode, result))
| lgpl-2.1 | 1,314,612,439,898,897,400 | 34.098039 | 80 | 0.694413 | false |
slackeater/anal-beh | firefoxSelenium.py | 1 | 1686 | #! /usr/bin/env python
import sys
from classes.sitemanager import SiteManager
from classes.gui.main import MainWindow
from classes.printer import Printer
from PyQt4 import QtGui
import argparse
__author__ = 'snake'
def main():
if len(sys.argv) == 1:
app = QtGui.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
else:
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--site", help="specify the sites you want to use."
" You can use country name to run only websites of that country.")
parser.add_argument("-c", "--cred", help="specify a file with the credentials for the log in of the sites.")
parser.add_argument("-u", "--urls", help="specify a file with a list of url to browse")
args = parser.parse_args()
p = Printer('console')
if args.urls:
sm = SiteManager("", p)
sm.urlsfromfile(args.urls)
else:
# Get the command line parameters
sitename = args.site
fileName = args.cred
sitecount = 0
sitearray = []
countries = ['usa', 'ch', 'it', 'uk', 'fr']
for site in sitename.split(','):
if len(site) != 0:
sitearray.append(site)
sitecount += 1
#Browse the site in order to collect some data
sm = SiteManager(fileName, p)
if sitename in countries:
sm.countrysession(sitename)
elif sitecount >= 1:
sm.surf(sitearray)
if __name__ == "__main__":
main()
| gpl-2.0 | 5,399,769,859,326,694,000 | 30.222222 | 116 | 0.544484 | false |
globz/kitchen-cli | setup.py | 1 | 1945 | """Packaging settings."""
from codecs import open
from os.path import abspath, dirname, join
from subprocess import call
from setuptools import Command, find_packages, setup
from kitchen import __version__
this_dir = abspath(dirname(__file__))
with open(join(this_dir, 'README.md'), encoding='utf-8') as file:
long_description = file.read()
class RunTests(Command):
"""Run all tests."""
description = 'run tests'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
"""Run all tests!"""
errno = call(['py.test', '--cov=kitchen', '--cov-report=term-missing'])
raise SystemExit(errno)
setup(
name = 'kitchen',
version = __version__,
description = 'A basic version control software baked into a "cooking simulator"',
long_description = long_description,
url = 'https://github.com/globz/kitchen-cli',
author = 'globz',
author_email = '[email protected]',
license = 'GPL-2.0',
classifiers = [
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: Public Domain',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords = 'cli',
packages = find_packages(exclude=['docs', 'tests*']),
install_requires = ['docopt'],
extras_require = {
'test': ['coverage', 'pytest', 'pytest-cov'],
},
entry_points = {
'console_scripts': [
'kitchen=kitchen.cli:main',
],
},
cmdclass = {'test': RunTests},
)
| gpl-2.0 | 7,183,078,768,081,821,000 | 26.785714 | 86 | 0.590231 | false |
levilucio/SyVOLT | ECore_Copier_MM/transformation/HEPackage.py | 1 | 2231 | from core.himesis import Himesis
import uuid
class HEPackage(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule EPackage.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HEPackage, self).__init__(name='HEPackage', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """EPackage"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'EPackage')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class EPackage() node
self.add_node()
self.vs[3]["mm__"] = """EPackage"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class EPackage()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# apply class EPackage() node
self.add_node()
self.vs[5]["mm__"] = """EPackage"""
self.vs[5]["attr1"] = """1"""
# apply_contains node for class EPackage()
self.add_node()
self.vs[6]["mm__"] = """apply_contains"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class EPackage()
(1,6), # applymodel -> apply_contains
(6,5), # apply_contains -> apply_class EPackage()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((5,'name'),(3,'name')), ((5,'nsURI'),(3,'nsURI')), ((5,'nsPrefix'),(3,'nsPrefix')), ((5,'ApplyAttribute'),('constant','solveRef')), ]
| mit | 5,270,245,473,863,462,000 | 27.602564 | 163 | 0.469745 | false |
noelevans/sandpit | wordsnake/wordsnaketest.py | 1 | 2233 | from wordsnake import flattern, getLeaves, listOfMapsToMap
from wordsnake import possibleSingleShift, snake, targetFound, validWord
import unittest
class WordSnakeTest( unittest.TestCase ):
def setup( self ):
pass
def test_validWord( self ):
self.assertTrue( validWord( 'hello' ) )
self.assertFalse( validWord( 'iufhwfb' ) )
def test_listOfMapsToMap( self ):
self.assertEquals(
{ 'giraffe' : {}, 'hippo' : {} },
listOfMapsToMap(
[
{ 'giraffe': {} },
{ 'hippo' : {} }, {} ] ) )
def test_getLeaves( self ):
self.assertEquals(
['giraffe'],
getLeaves( {
'hi': { 'bye': {} },
'hello': { 'ciao': { 'giraffe': {} } },
'salut': {} } ) )
self.assertEquals(
[ 'hippo', 'giraffe' ] ,
getLeaves( {
'hi': { 'ciao': { 'giraffe': {} } },
'hello': { 'bye': {} },
'salut': { 'aufwidersen': { 'hippo': {} } } } ) )
self.assertEquals(
[ 'uno', 'due' ],
getLeaves( {
'alpha': [ 'uno', 'due' ],
'beta': { 'dos' : {} } } ) )
self.assertEquals(
[ 'uno', 'due' ],
getLeaves( {
'beta': { 'dos' : {} },
'alpha': [ 'uno', 'due' ] } ) )
def test_flattern( self ):
self.maxDiff = None
self.assertEquals( sorted( [ 'hi', 'ciao', 'giraffe', 'hello', 'bye', 'salut', 'aufwidersen', 'hippo' ] ),
sorted( flattern( {
'hi': { 'ciao': { 'giraffe': {} } },
'hello': { 'bye': {} },
'salut': { 'aufwidersen': { 'hippo': {} } }
} ) ) )
def test_possibleSingleShift( self ):
self.assertEquals(
[ 'bat', 'eat', 'fat', 'hat', 'lat', 'mat', 'oat', 'pat',
'rat', 'sat', 'tat', 'vat', 'cit', 'cot', 'cut', 'cwt',
'cab', 'cad', 'cal', 'cam', 'can', 'cap', 'car', 'caw', 'cay'],
possibleSingleShift( 'cat' ) )
def test_targetFound( self ):
self.assertTrue( targetFound( { 'hi': { 'bye': {} },
'hello': { 'ciao': { 'giraffe': {} } },
'salut': {}
}, 'giraffe' ) )
def test_snake( self ):
self.assertEquals(
'dog',
snake( 'cat', 'dog' ) )
if __name__ == '__main__':
unittest.main()
| mit | 2,287,705,511,824,483,600 | 28.773333 | 109 | 0.464845 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.