repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
notapresent/rbm2m | rbm2m/action/scan_manager.py | 1 | 3426 | # -*- coding: utf-8 -*-
import datetime
import logging
from sqlalchemy import and_, func
from base_manager import BaseManager
from ..models import Scan, scan_records, Genre
# All scans with no activity for this long are considered stalled
INACTIVITY_PERIOD = datetime.timedelta(seconds=600)
# Update interval
UPDATE_INTERVAL = datetime.timedelta(days=1)
logger = logging.getLogger(__name__)
class ScanManager(BaseManager):
"""
Handles all DB interactions regarding scans
"""
__model__ = Scan
def get_current_scans(self, genre_id=None):
"""
Returns currently running scans for genre (or all genres if genre_id is None)
:return list of Scans
"""
query = (
self.session.query(Scan)
.filter(Scan.status.in_(['queued', 'running']))
)
if genre_id:
query = query.filter(Scan.genre_id == genre_id)
return query.all()
def last_scans(self):
return (
self.session.query(Scan)
.order_by(Scan.started_at.desc())
.limit(50)
.all()
)
def records_not_in_scan(self, scan_id, rec_ids):
result = (
self.session.query(scan_records.c.record_id)
.filter(scan_records.c.scan_id == scan_id)
.filter(scan_records.c.record_id.in_(rec_ids))
.all()
)
in_scan = [rec_id for rec_id, in result]
return list(set(rec_ids) - set(in_scan))
def get_stalled_scans(self):
"""
Mark scans with no activity during last INACTIVITY_THRESHOLD seconds as failed
:return: List of stalled scans
"""
threshold = datetime.datetime.utcnow() - INACTIVITY_PERIOD
active_scans = (
self.session.query(Scan)
.filter(Scan.status.in_(['queued', 'running']))
.all()
)
rv = [s for s in active_scans if s.last_action < threshold]
return rv
def get_genre_with_no_scans_in_24h(self):
"""
Find one imported genre for which there were no succesful scans in last day
:return: Genre
"""
threshold = datetime.datetime.utcnow() - UPDATE_INTERVAL
q = (
self.session.query(Genre)
.select_from(Scan)
.join(Genre)
.filter(Scan.status == 'success')
.filter(Genre.import_enabled.is_(True))
.group_by(Scan.genre_id)
.having(func.max(Scan.started_at) < threshold)
)
return q.first()
def get_genre_with_no_scans(self):
"""
Find one imported genre for which there were no successful scans at all
:return: Genre
"""
q = (
self.session.query(Genre)
.outerjoin(Scan,
and_(
Scan.genre_id == Genre.id,
Scan.status == 'success')
)
.filter(Genre.import_enabled.is_(True))
.filter(Scan.id.is_(None))
)
return q.first()
def clean_up_old_scans(self):
"""
Delete all scans older than 7 days from now
"""
threshold = datetime.datetime.utcnow() - datetime.timedelta(days=7)
self.session.query(Scan).filter(Scan.started_at < threshold).delete()
| apache-2.0 | 1,392,480,987,541,680,600 | 28.534483 | 90 | 0.542323 | false |
pdarragh/EMC-Mars-Challenge | Hackpack/db.py | 1 | 2571 | import json
import pymongo
from pymongo import MongoClient
client = MongoClient('mongodb://107.170.244.164/', 27017)
db = client.mars_db
game_coll = db.game_data
sensor_coll = db.sensor_data
log_coll = db.log_data
db2 = client.mars_db2
game_coll2 = db2.game_data
sensor_coll2 = db2.sensor_data
log_coll2 = db2.log_data
db3 = client.mars_db3
game_coll3 = db3.game_data
sensor_coll3 = db3.sensor_data
log_coll3 = db3.log_data
# Inserts the json data into the sensor_data collection
# Returns the inserted_id
def game_insert(json_data):
result = game_coll.insert_one(json_data)
game_coll2.insert_one(json_data)
game_coll3.insert_one(json_data)
return result.inserted_id
# Gets the data based on the json query
def game_get(json_query):
return game_coll.find(json_query)
# Returns an array of all of the data in the sensor_data
# collection
def game_get_all():
return game_coll.find()
# Gets the records where all of the readings are greater than
# the specified readings
#
# Give the radiation value first, then the temperature value
# then the flare value
def game_get_threshold(rad, temp, flare):
new_rad = "$gt: " + rad
new_temp = "$gt: " + temp
return game_coll.find({"readings.radiation": new_rad,
"readings.temperature": new_temp,
"readings.solarFlare": flare})
def game_reset():
game_coll.drop()
# Inserts the json data into the sensor_data collection
# Returns the inserted_id
def sensor_insert(json_data):
result = sensor_coll.insert_one(json_data)
sensor_coll2.insert_one(json_data)
sensor_coll3.insert_one(json_data)
return result.inserted_id
# Gets the data based on the json query
def sensor_get(json_query):
return sensor_coll.find(json_query)
# Returns an array of all of the data in the sensor_data
# collection
def sensor_get_all():
return sensor_coll.find()
# Gets the records where all of the readings are greater than
# the specified readings
#
# Give the radiation value first, then the temperature value
# then the flare value
def sensor_get_threshold(rad, temp, flare):
new_rad = "$gt: " + rad
new_temp = "$gt: " + temp
return sensor_coll.find({"readings.radiation": new_rad,
"readings.temperature": new_temp,
"readings.solarFlare": flare})
def sensor_reset():
sensor_coll.drop()
def log_insert(json_data):
log_coll.insert_one(json_data)
log_coll2.insert_one(json_data)
log_coll3.insert_one(json_data)
def log_get(json_query):
log_coll.find(json_query)
def log_get_all():
log_coll.find() | mit | -148,093,916,572,310,200 | 22.381818 | 61 | 0.702839 | false |
rs-dev/Test-Secretary | test_secretary/settings.py | 1 | 3278 | """
Django settings for test_secretary project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
import platform
devs = ['thinkstation_tester']
dev_mode = platform.node() in devs
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wm#%lip)^y=$5r850+g5mbv!nd#5eh8x9+icqe=2vjvn@#1kbn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = dev_mode
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'test_secretary',
'unittester',
'django_extensions',
'south',
'kombu.transport.django',
'djcelery',
)
if dev_mode:
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request"
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'test_secretary', 'templates'),
)
ROOT_URLCONF = 'test_secretary.urls'
WSGI_APPLICATION = 'test_secretary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test_secretary',
'USER': 'testsecretary',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Vienna'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
BROKER_URL = 'django://'
CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend'
CELERY_IGNORE_RESULT = False
CELERY_ACCEPT_CONTENT = ['json', 'pickle']
| isc | -6,406,374,605,176,211,000 | 25.015873 | 71 | 0.711104 | false |
DigitalCampus/django-oppia | tests/profile/views/test_feedback.py | 1 | 3143 | from django.urls import reverse
from oppia.test import OppiaTestCase
class FeedbackViewsTest(OppiaTestCase):
fixtures = ['tests/test_user.json',
'tests/test_oppia.json',
'tests/test_quiz.json',
'tests/test_permissions.json',
'tests/test_cohort.json',
'default_gamification_events.json',
'tests/test_tracker.json',
'tests/test_course_permissions.json',
'tests/test_feedback.json']
user_all_feedback_user = reverse('profile:user_all_feedback_responses',
args=[2])
user_all_feedback_admin = reverse('profile:user_all_feedback_responses',
args=[1])
feedback_template = 'profile/feedback/global_responses.html'
def test_feedback_list_admin_get_user(self):
self.client.force_login(user=self.admin_user)
response = self.client.get(self.user_all_feedback_user)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, self.feedback_template)
def test_feedback_list_staff_get_user(self):
self.client.force_login(user=self.staff_user)
response = self.client.get(self.user_all_feedback_user)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, self.feedback_template)
def test_feedback_list_teacher_get_user(self):
self.client.force_login(user=self.teacher_user)
response = self.client.get(self.user_all_feedback_user)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, self.feedback_template)
def test_feedback_list_user_get_user(self):
self.client.force_login(user=self.normal_user)
response = self.client.get(self.user_all_feedback_user)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, self.feedback_template)
def test_feedback_list_admin_get_admin(self):
self.client.force_login(user=self.admin_user)
response = self.client.get(self.user_all_feedback_admin)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, self.feedback_template)
def test_feedback_list_staff_get_admin(self):
self.client.force_login(user=self.staff_user)
response = self.client.get(self.user_all_feedback_admin)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, self.feedback_template)
def test_feedback_list_teacher_get_admin(self):
self.client.force_login(user=self.teacher_user)
response = self.client.get(self.user_all_feedback_admin)
self.assertEqual(403, response.status_code)
self.assertTemplateUsed(response, self.unauthorized_template)
def test_feedback_list_user_get_admin(self):
self.client.force_login(user=self.normal_user)
response = self.client.get(self.user_all_feedback_admin)
self.assertEqual(403, response.status_code)
self.assertTemplateUsed(response, self.unauthorized_template)
| gpl-3.0 | 7,041,121,322,585,345,000 | 43.267606 | 76 | 0.671969 | false |
kustomzone/Fuzium | core/src/Site/SiteStorage.py | 1 | 18598 | import os
import re
import shutil
import json
import time
import sys
import sqlite3
import gevent.event
from Db import Db
from Debug import Debug
from Config import config
from util import helper
from Plugin import PluginManager
@PluginManager.acceptPlugins
class SiteStorage(object):
def __init__(self, site, allow_create=True):
self.site = site
self.directory = "%s/%s" % (config.data_dir, self.site.address) # Site data diretory
self.allowed_dir = os.path.abspath(self.directory.decode(sys.getfilesystemencoding())) # Only serve file within this dir
self.log = site.log
self.db = None # Db class
self.db_checked = False # Checked db tables since startup
self.event_db_busy = None # Gevent AsyncResult if db is working on rebuild
self.has_db = self.isFile("dbschema.json") # The site has schema
if not os.path.isdir(self.directory):
if allow_create:
os.mkdir(self.directory) # Create directory if not found
else:
raise Exception("Directory not exists: %s" % self.directory)
# Load db from dbschema.json
def openDb(self, check=True):
try:
schema = self.loadJson("dbschema.json")
db_path = self.getPath(schema["db_file"])
except Exception, err:
raise Exception("dbschema.json is not a valid JSON: %s" % err)
if check:
if not os.path.isfile(db_path) or os.path.getsize(db_path) == 0: # Not exist or null
self.rebuildDb()
if not self.db:
self.db = Db(schema, db_path)
if check and not self.db_checked:
changed_tables = self.db.checkTables()
if changed_tables:
self.rebuildDb(delete_db=False) # TODO: only update the changed table datas
def closeDb(self):
if self.db:
self.db.close()
self.event_db_busy = None
self.db = None
# Return db class
def getDb(self):
if not self.db:
self.log.debug("No database, waiting for dbschema.json...")
self.site.needFile("dbschema.json", priority=3)
self.has_db = self.isFile("dbschema.json") # Recheck if dbschema exist
if self.has_db:
self.openDb()
return self.db
# Return possible db files for the site
def getDbFiles(self):
for content_inner_path, content in self.site.content_manager.contents.iteritems():
# content.json file itself
if self.isFile(content_inner_path): # Missing content.json file
yield self.getPath(content_inner_path), self.open(content_inner_path)
else:
self.log.error("[MISSING] %s" % content_inner_path)
# Data files in content.json
content_inner_path_dir = helper.getDirname(content_inner_path) # Content.json dir relative to site
for file_relative_path in content["files"].keys():
if not file_relative_path.endswith(".json"):
continue # We only interesed in json files
file_inner_path = content_inner_path_dir + file_relative_path # File Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading /
if self.isFile(file_inner_path):
yield self.getPath(file_inner_path), self.open(file_inner_path)
else:
self.log.error("[MISSING] %s" % file_inner_path)
# Rebuild sql cache
def rebuildDb(self, delete_db=True):
self.has_db = self.isFile("dbschema.json")
if not self.has_db:
return False
self.event_db_busy = gevent.event.AsyncResult()
schema = self.loadJson("dbschema.json")
db_path = self.getPath(schema["db_file"])
if os.path.isfile(db_path) and delete_db:
if self.db:
self.db.close() # Close db if open
time.sleep(0.5)
self.log.info("Deleting %s" % db_path)
try:
os.unlink(db_path)
except Exception, err:
self.log.error("Delete error: %s" % err)
self.db = None
self.openDb(check=False)
self.log.info("Creating tables...")
self.db.checkTables()
self.log.info("Importing data...")
cur = self.db.getCursor()
cur.execute("BEGIN")
cur.logging = False
found = 0
s = time.time()
try:
for file_inner_path, file in self.getDbFiles():
try:
if self.db.loadJson(file_inner_path, file=file, cur=cur):
found += 1
except Exception, err:
self.log.error("Error importing %s: %s" % (file_inner_path, Debug.formatException(err)))
finally:
cur.execute("END")
self.log.info("Imported %s data file in %ss" % (found, time.time() - s))
self.event_db_busy.set(True) # Event done, notify waiters
self.event_db_busy = None # Clear event
# Execute sql query or rebuild on dberror
def query(self, query, params=None):
if self.event_db_busy: # Db not ready for queries
self.log.debug("Wating for db...")
self.event_db_busy.get() # Wait for event
try:
res = self.getDb().execute(query, params)
except sqlite3.DatabaseError, err:
if err.__class__.__name__ == "DatabaseError":
self.log.error("Database error: %s, query: %s, try to rebuilding it..." % (err, query))
self.rebuildDb()
res = self.db.cur.execute(query, params)
else:
raise err
return res
# Open file object
def open(self, inner_path, mode="rb"):
return open(self.getPath(inner_path), mode)
# Open file object
def read(self, inner_path, mode="r"):
return open(self.getPath(inner_path), mode).read()
# Write content to file
def write(self, inner_path, content):
file_path = self.getPath(inner_path)
# Create dir if not exist
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
# Write file
if hasattr(content, 'read'): # File-like object
with open(file_path, "wb") as file:
shutil.copyfileobj(content, file) # Write buff to disk
else: # Simple string
if inner_path == "content.json" and os.path.isfile(file_path):
helper.atomicWrite(file_path, content)
else:
with open(file_path, "wb") as file:
file.write(content)
del content
self.onUpdated(inner_path)
# Remove file from filesystem
def delete(self, inner_path):
file_path = self.getPath(inner_path)
os.unlink(file_path)
self.onUpdated(inner_path, file=False)
def deleteDir(self, inner_path):
dir_path = self.getPath(inner_path)
os.rmdir(dir_path)
def rename(self, inner_path_before, inner_path_after):
for retry in range(3):
# To workaround "The process cannot access the file beacause it is being used by another process." error
try:
os.rename(self.getPath(inner_path_before), self.getPath(inner_path_after))
err = None
break
except Exception, err:
self.log.error("%s rename error: %s (retry #%s)" % (inner_path_before, err, retry))
time.sleep(0.1 + retry)
if err:
raise err
# List files from a directory
def list(self, dir_inner_path):
directory = self.getPath(dir_inner_path)
for root, dirs, files in os.walk(directory):
root = root.replace("\\", "/")
root_relative_path = re.sub("^%s" % re.escape(directory), "", root).lstrip("/")
for file_name in files:
if root_relative_path: # Not root dir
yield root_relative_path + "/" + file_name
else:
yield file_name
# Site content updated
def onUpdated(self, inner_path, file=None):
file_path = self.getPath(inner_path)
# Update Sql cache
if inner_path == "dbschema.json":
self.has_db = self.isFile("dbschema.json")
# Reopen DB to check changes
if self.has_db:
self.closeDb()
self.openDb()
elif not config.disable_db and inner_path.endswith(".json") and self.has_db: # Load json file to db
if config.verbose:
self.log.debug("Loading json file to db: %s" % inner_path)
try:
self.getDb().loadJson(file_path, file)
except Exception, err:
self.log.error("Json %s load error: %s" % (inner_path, Debug.formatException(err)))
self.closeDb()
# Load and parse json file
def loadJson(self, inner_path):
with self.open(inner_path) as file:
return json.load(file)
# Write formatted json file
def writeJson(self, inner_path, data):
content = json.dumps(data, indent=1, sort_keys=True)
# Make it a little more compact by removing unnecessary white space
def compact_dict(match):
if "\n" in match.group(0):
return match.group(0).replace(match.group(1), match.group(1).strip())
else:
return match.group(0)
content = re.sub("\{(\n[^,\[\{]{10,100}?)\}[, ]{0,2}\n", compact_dict, content, flags=re.DOTALL)
# Remove end of line whitespace
content = re.sub("(?m)[ ]+$", "", content)
# Write to disk
self.write(inner_path, content)
# Get file size
def getSize(self, inner_path):
path = self.getPath(inner_path)
try:
return os.path.getsize(path)
except:
return 0
# File exist
def isFile(self, inner_path):
return os.path.isfile(self.getPath(inner_path))
# File or directory exist
def isExists(self, inner_path):
return os.path.exists(self.getPath(inner_path))
# Dir exist
def isDir(self, inner_path):
return os.path.isdir(self.getPath(inner_path))
# Security check and return path of site's file
def getPath(self, inner_path):
inner_path = inner_path.replace("\\", "/") # Windows separator fix
if not inner_path:
return self.directory
if ".." in inner_path:
raise Exception(u"File not allowed: %s" % inner_path)
return u"%s/%s" % (self.directory, inner_path)
# Get site dir relative path
def getInnerPath(self, path):
if path == self.directory:
inner_path = ""
else:
inner_path = re.sub("^%s/" % re.escape(self.directory), "", path)
return inner_path
# Verify all files sha512sum using content.json
def verifyFiles(self, quick_check=False, add_optional=False, add_changed=True):
bad_files = []
i = 0
if not self.site.content_manager.contents.get("content.json"): # No content.json, download it first
self.log.debug("VerifyFile content.json not exists")
self.site.needFile("content.json", update=True) # Force update to fix corrupt file
self.site.content_manager.loadContent() # Reload content.json
for content_inner_path, content in self.site.content_manager.contents.items():
i += 1
if i % 50 == 0:
time.sleep(0.0001) # Context switch to avoid gevent hangs
if not os.path.isfile(self.getPath(content_inner_path)): # Missing content.json file
self.log.debug("[MISSING] %s" % content_inner_path)
bad_files.append(content_inner_path)
for file_relative_path in content.get("files", {}).keys():
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading /
file_path = self.getPath(file_inner_path)
if not os.path.isfile(file_path):
self.log.debug("[MISSING] %s" % file_inner_path)
bad_files.append(file_inner_path)
continue
if quick_check:
ok = os.path.getsize(file_path) == content["files"][file_relative_path]["size"]
else:
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
if not ok:
self.log.debug("[CHANGED] %s" % file_inner_path)
if add_changed or content.get("cert_user_id"): # If updating own site only add changed user files
bad_files.append(file_inner_path)
# Optional files
optional_added = 0
optional_removed = 0
for file_relative_path in content.get("files_optional", {}).keys():
file_node = content["files_optional"][file_relative_path]
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
file_inner_path = file_inner_path.strip("/") # Strip leading /
file_path = self.getPath(file_inner_path)
if not os.path.isfile(file_path):
if self.site.content_manager.hashfield.hasHash(file_node["sha512"]):
self.site.content_manager.optionalRemove(file_inner_path, file_node["sha512"], file_node["size"])
if add_optional:
bad_files.append(file_inner_path)
continue
if quick_check:
ok = os.path.getsize(file_path) == content["files_optional"][file_relative_path]["size"]
else:
ok = self.site.content_manager.verifyFile(file_inner_path, open(file_path, "rb"))
if ok:
if not self.site.content_manager.hashfield.hasHash(file_node["sha512"]):
self.site.content_manager.optionalDownloaded(file_inner_path, file_node["sha512"], file_node["size"])
optional_added += 1
else:
if self.site.content_manager.hashfield.hasHash(file_node["sha512"]):
self.site.content_manager.optionalRemove(file_inner_path, file_node["sha512"], file_node["size"])
optional_removed += 1
bad_files.append(file_inner_path)
self.log.debug("[OPTIONAL CHANGED] %s" % file_inner_path)
if config.verbose:
self.log.debug(
"%s verified: %s, quick: %s, optionals: +%s -%s" %
(content_inner_path, len(content["files"]), quick_check, optional_added, optional_removed)
)
time.sleep(0.0001) # Context switch to avoid gevent hangs
return bad_files
# Check and try to fix site files integrity
def updateBadFiles(self, quick_check=True):
s = time.time()
bad_files = self.verifyFiles(
quick_check,
add_optional=self.site.isDownloadable(""),
add_changed=not self.site.settings.get("own") # Don't overwrite changed files if site owned
)
self.site.bad_files = {}
if bad_files:
for bad_file in bad_files:
self.site.bad_files[bad_file] = 1
self.log.debug("Checked files in %.2fs... Found bad files: %s, Quick:%s" % (time.time() - s, len(bad_files), quick_check))
# Delete site's all file
def deleteFiles(self):
self.log.debug("Deleting files from content.json...")
files = [] # Get filenames
for content_inner_path in self.site.content_manager.contents.keys():
content = self.site.content_manager.contents[content_inner_path]
files.append(content_inner_path)
# Add normal files
for file_relative_path in content.get("files", {}).keys():
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
files.append(file_inner_path)
# Add optional files
for file_relative_path in content.get("files_optional", {}).keys():
file_inner_path = helper.getDirname(content_inner_path) + file_relative_path # Relative to site dir
files.append(file_inner_path)
if self.isFile("dbschema.json"):
self.log.debug("Deleting db file...")
self.closeDb()
self.has_db = False
try:
schema = self.loadJson("dbschema.json")
db_path = self.getPath(schema["db_file"])
if os.path.isfile(db_path):
os.unlink(db_path)
except Exception, err:
self.log.error("Db file delete error: %s" % err)
for inner_path in files:
path = self.getPath(inner_path)
if os.path.isfile(path):
for retry in range(5):
try:
os.unlink(path)
break
except Exception, err:
self.log.error("Error removing %s: %s, try #%s" % (path, err, retry))
time.sleep(float(retry) / 10)
self.onUpdated(inner_path, False)
self.log.debug("Deleting empty dirs...")
for root, dirs, files in os.walk(self.directory, topdown=False):
for dir in dirs:
path = os.path.join(root, dir)
if os.path.isdir(path) and os.listdir(path) == []:
os.removedirs(path)
self.log.debug("Removing %s" % path)
if os.path.isdir(self.directory) and os.listdir(self.directory) == []:
os.removedirs(self.directory) # Remove sites directory if empty
if os.path.isdir(self.directory):
self.log.debug("Some unknown file remained in site data dir: %s..." % self.directory)
return False # Some files not deleted
else:
self.log.debug("Site data directory deleted: %s..." % self.directory)
return True # All clean
| mit | -2,596,954,781,683,501,000 | 41.364465 | 130 | 0.561136 | false |
izapolsk/integration_tests | cfme/tests/infrastructure/test_pxe_provisioning.py | 1 | 4876 | import fauxfactory
import pytest
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.scvmm import SCVMMProvider
from cfme.infrastructure.pxe import get_pxe_server_from_config
from cfme.infrastructure.pxe import get_template_from_config
from cfme.provisioning import do_vm_provisioning
from cfme.utils import testgen
from cfme.utils.conf import cfme_data
pytestmark = [
pytest.mark.meta(server_roles="+automate +notifier"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider],
required_fields=[
['provisioning', 'pxe_server'],
['provisioning', 'pxe_image'],
['provisioning', 'pxe_image_type'],
['provisioning', 'pxe_kickstart'],
['provisioning', 'pxe_template'],
['provisioning', 'datastore'],
['provisioning', 'host'],
['provisioning', 'pxe_root_password'],
['provisioning', 'vlan']
]
)
pargnames, pargvalues, pidlist = testgen.pxe_servers(metafunc)
argnames = argnames
pxe_server_names = [pval[0] for pval in pargvalues]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(list(zip(argnames, argvalue_tuple)))
provider = args['provider']
if provider.one_of(SCVMMProvider):
continue
provisioning_data = provider.data['provisioning']
pxe_server_name = provisioning_data['pxe_server']
if pxe_server_name not in pxe_server_names:
continue
pxe_cust_template = provisioning_data['pxe_kickstart']
if pxe_cust_template not in list(cfme_data.get('customization_templates', {}).keys()):
continue
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope='module')
def pxe_server(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_server_name = provisioning_data['pxe_server']
return get_pxe_server_from_config(pxe_server_name, appliance=appliance)
@pytest.fixture(scope='module')
def pxe_cust_template(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_cust_template = provisioning_data['pxe_kickstart']
return get_template_from_config(pxe_cust_template, create=True, appliance=appliance)
@pytest.fixture(scope="function")
def setup_pxe_servers_vm_prov(pxe_server, pxe_cust_template, provisioning):
if not pxe_server.exists():
pxe_server.create()
pxe_server.set_pxe_image_type(provisioning['pxe_image'], provisioning['pxe_image_type'])
@pytest.fixture(scope="function")
def vm_name():
vm_name = fauxfactory.gen_alphanumeric(20, start="test_pxe_prov_")
return vm_name
@pytest.mark.rhv1
@test_requirements.provision
def test_pxe_provision_from_template(appliance, provider, vm_name, setup_provider,
request, setup_pxe_servers_vm_prov):
"""Tests provisioning via PXE
Metadata:
test_flag: pxe, provision
suite: infra_provisioning
Polarion:
assignee: jhenner
casecomponent: Provisioning
initialEstimate: 1/6h
testtype: functional
upstream: yes
"""
# generate_tests makes sure these have values
(
pxe_template, host, datastore,
pxe_server, pxe_image, pxe_kickstart,
pxe_root_password, pxe_image_type, pxe_vlan
) = list(map(
provider.data['provisioning'].get,
(
'pxe_template', 'host', 'datastore',
'pxe_server', 'pxe_image', 'pxe_kickstart',
'pxe_root_password', 'pxe_image_type', 'vlan'
)
))
request.addfinalizer(
lambda: appliance.collections.infra_vms.instantiate(vm_name, provider)
.cleanup_on_provider())
provisioning_data = {
'catalog': {
'vm_name': vm_name,
'provision_type': 'PXE',
'pxe_server': pxe_server,
'pxe_image': {'name': pxe_image}},
'environment': {
'host_name': {'name': host},
'datastore_name': {'name': datastore}},
'customize': {
'custom_template': {'name': pxe_kickstart},
'root_password': pxe_root_password},
'network': {
'vlan': partial_match(pxe_vlan)}}
do_vm_provisioning(appliance, pxe_template, provider, vm_name, provisioning_data, request,
num_sec=3600)
| gpl-2.0 | 682,521,297,626,256,600 | 32.170068 | 94 | 0.640484 | false |
migasfree/migasfree-backend | migasfree/settings/base.py | 1 | 7965 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2021 Jose Antonio Chavarría <[email protected]>
# Copyright (c) 2015-2021 Alberto Gacías <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import django
import django.conf.global_settings as DEFAULT_SETTINGS
from corsheaders.defaults import default_headers
from .migasfree import BASE_DIR, MIGASFREE_TMP_DIR
if django.VERSION < (3, 1, 0, 'final'):
print('Migasfree requires Django 3.1.0 at least. Please, update it.')
exit(1)
ADMINS = (
('Your name', '[email protected]'),
)
MANAGERS = ADMINS
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Madrid'
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
FIRST_DAY_OF_WEEK = 1
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i:s'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATIC_URL = '/static/'
MEDIA_URL = '/pub/'
FILE_UPLOAD_TEMP_DIR = MIGASFREE_TMP_DIR
LOGIN_REDIRECT_URL = '/'
LOCALE_PATHS = (
os.path.join(BASE_DIR, 'i18n'),
)
ADMIN_SITE_ROOT_URL = '/admin/'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
DEFAULT_CHARSET = 'utf-8'
ROOT_URLCONF = 'migasfree.urls'
ASGI_APPLICATION = 'migasfree.asgi.application'
WSGI_APPLICATION = 'migasfree.wsgi.application'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.humanize',
'django.contrib.admindocs',
'django.contrib.messages',
'django.contrib.staticfiles',
'graphene_django',
'rest_framework',
'rest_framework.authtoken',
'drf_yasg',
'rest_framework_filters',
'dj_rest_auth',
'django_filters',
'corsheaders',
'djoser',
'import_export',
'markdownx',
'channels',
'migasfree.core',
'migasfree.app_catalog',
'migasfree.client',
'migasfree.stats',
'migasfree.hardware',
'migasfree.device',
'migasfree.api_v4',
)
DATA_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024 * 100 # 100 MB
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework_filters.backends.RestFrameworkFilterBackend',
'rest_framework.filters.OrderingFilter',
'rest_framework.filters.SearchFilter',
),
'DEFAULT_PAGINATION_CLASS': 'migasfree.paginations.DefaultPagination',
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
}
REST_AUTH_SERIALIZERS = {
'USER_DETAILS_SERIALIZER': 'migasfree.core.serializers.UserProfileSerializer',
}
GRAPHENE = {
'SCHEMA': 'migasfree.schema.schema'
}
CORS_ALLOW_HEADERS = list(default_headers) + [
'accept-language',
]
# http://docs.python.org/2/howto/logging-cookbook.html
# http://docs.python.org/2/library/logging.html#logrecord-attributes
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(asctime)s - %(levelname)s - %(module)s - %(lineno)d '
'- %(funcName)s - %(message)s',
},
'simple': {
'format': '%(asctime)s - %(levelname)s - %(filename)s - %(message)s'
},
},
'handlers': {
'console': {
'level': 'ERROR',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'file': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': os.path.join(MIGASFREE_TMP_DIR, 'migasfree-backend.log'),
'maxBytes': 1024 * 1024 * 10, # 10 MB
},
'celery': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(MIGASFREE_TMP_DIR, 'migasfree-celery.log'),
'formatter': 'simple',
'maxBytes': 1024 * 1024 * 100, # 100 MB
},
},
'loggers': {
'migasfree': {
'handlers': ['console', 'file'],
'level': 'ERROR',
},
'celery': {
'handlers': ['celery', 'console'],
'level': 'DEBUG',
},
},
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'debug': False,
}
}
]
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
SWAGGER_SETTINGS = {
'LOGIN_URL': '/rest-auth/login/',
'LOGOUT_URL': '/rest-auth/logout/'
}
SESSION_COOKIE_NAME = 'migasfree_backend'
# CSRF_COOKIE_NAME = 'csrftoken_migasfree_backend' # issue with markdownx component :_(
| gpl-3.0 | -5,171,708,381,460,351,000 | 29.864341 | 91 | 0.656662 | false |
Spanarchie/pyRest | pyRest/lib/python2.7/site-packages/py/_path/common.py | 1 | 12572 | """
"""
import os, sys
import py
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(str(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory Path of the current Path joined
with any given path arguments.
"""
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if not cr:
content = self.read('rU')
return content.split('\n')
else:
f = self.open('rU')
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = str(self)
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, str):
fil = FNMatcher(fil)
if isinstance(rec, str):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return py.std.fnmatch.fnmatch(name, pattern)
| unlicense | -194,356,141,815,722,620 | 30.989822 | 82 | 0.529271 | false |
craigschmidt/rpy2-2.3.9 | setup.py | 1 | 19590 |
import os, os.path, sys, shutil, re, itertools, warnings
from collections import namedtuple
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.build import build as _build
from distutils.core import setup
from distutils.core import Extension
pack_name = 'rpy2'
pack_version = __import__('rpy').__version__
default_lib_directory = 'bin' if sys.platform=='win32' else 'lib'
package_prefix='.'
if sys.version_info >= (3,):
print("Using 2to3 to translate Python2-only idioms into Python3 code. Please wait...")
# Python 3 and we need to translate code
package_prefix = os.path.join('build', 'python3_rpy')
from distutils import filelist, dir_util, file_util, util#, log
#log.set_verbosity(1)
fl = filelist.FileList()
tmp = open("MANIFEST.in")
for line in tmp:
line = line.rstrip()
if line != '':
fl.process_template_line(line)
tmp.close()
dir_util.create_tree(package_prefix, fl.files)
outfiles_2to3 = []
#dist_script = os.path.join("build", "src", "distribute_setup.py")
for f in fl.files:
outf, copied = file_util.copy_file(f, os.path.join(package_prefix, f),
update=1)
if copied and outf.endswith(".py"): #and outf != dist_script:
outfiles_2to3.append(outf)
if copied and outf.endswith('api_tests.txt'):
# XXX support this in distutils as well
from lib2to3.main import main
main('lib2to3.fixes', ['-wd', os.path.join(package_prefix,
'tests', 'api_tests.txt')])
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, package_prefix)
src_root = package_prefix
print('done.')
else:
from distutils.core import setup
from distutils.core import Extension
class build(_build):
user_options = _build.user_options + \
[
#('r-autoconfig', None,
# "guess all configuration paths from " +\
# "the R executable found in the PATH " +\
# "(this overrides r-home)"),
('r-home=', None,
"full path for the R home to compile against " +\
"(see r-autoconfig for an automatic configuration)"),
('r-home-lib=', None,
"full path for the R shared lib/ directory " +\
"(<r-home>/%s otherwise)" % default_lib_directory),
('r-home-modules=', None,
"full path for the R shared modules/ directory " +\
"(<r-home>/modules otherwise)"),
('ignore-check-rversion', None, 'ignore checks for supported R versions')]
boolean_options = _build.boolean_options + \
['ignore_check_rversion', ]
def initialize_options(self):
_build.initialize_options(self)
self.r_autoconfig = None
self.r_home = None
self.r_home_lib = None
self.r_home_modules = None
self.ignore_check_rversion = False
class build_ext(_build_ext):
"""
-DRPY_STRNDUP : definition of strndup()
-DRPY_VERBOSE
-DRPY_DEBUG_PRESERV
-DRPY_DEBUG_PROMISE : evaluation of promises
-DRPY_DEBUG_OBJECTINIT : initialization of PySexpObject
-DRPY_DEBUG_CONSOLE : console I/O
-DRPY_DEBUG_COBJECT : SexpObject passed as a CObject
-DRPY_DEBUG_GRDEV
"""
user_options = _build_ext.user_options + \
[
#('r-autoconfig', None,
# "guess all configuration paths from " +\
# "the R executable found in the PATH " +\
# "(this overrides r-home)"),
('r-home=', None,
"full path for the R home to compile against " +\
"(see r-autoconfig for an automatic configuration)"),
('r-home-lib=', None,
"full path for the R shared lib/ directory" +\
"(<r-home>/%s otherwise)" % default_lib_directory),
('r-home-modules=', None,
"full path for the R shared modules/ directory" +\
"(<r-home>/modules otherwise)"),
('ignore-check-rversion', None, 'ignore checks for supported R versions')]
boolean_options = _build_ext.boolean_options + \
['ignore-check-rversion', ] #+ \
#['r-autoconfig', ]
def initialize_options(self):
_build_ext.initialize_options(self)
self.r_autoconfig = None
self.r_home = None
self.r_home_lib = None
self.r_home_modules = None
self.ignore_check_rversion = False
def finalize_options(self):
self.set_undefined_options('build',
#('r_autoconfig', 'r_autoconfig'),
('r_home', 'r_home'))
_build_ext.finalize_options(self)
if self.r_home is None:
# use the R_HOME environment variable if it exists
if 'R_HOME' in os.environ:
self.r_home = os.environ['R_HOME']
if len(self.r_home) == 0:
raise SystemExit("Error: Tried to use R_HOME environment variable, but it is empty.")
else:
# see if R is in our current path, and ask it for the value of R HOME
tmp = os.popen("R RHOME")
self.r_home = tmp.readlines()
tmp.close()
if len(self.r_home) == 0:
raise SystemExit("Error: Tried to guess R's HOME but no R command in the PATH.")
#Twist if 'R RHOME' spits out a warning
if self.r_home[0].startswith("WARNING"):
self.r_home = self.r_home[1]
else:
self.r_home = self.r_home[0]
#self.r_home = [self.r_home, ]
if self.r_home is None:
raise SystemExit("Error: --r-home not specified.")
else:
self.r_home = self.r_home.split(os.pathsep)
rversions = []
for r_home in self.r_home:
r_home = r_home.strip()
rversion = get_rversion(r_home)
if rversion[0] == 'development' or \
cmp_version(rversion[:2], [2, 8]) == -1:
if self.ignore_check_rversion:
warnings.warn("R did not seem to have the minimum required version number")
else:
raise SystemExit("Error: R >= 2.8 required (and R told '%s')." %'.'.join(rversion))
rversions.append(rversion)
config = RConfig()
for about in ('--ldflags', '--cppflags'):
config += get_rconfig(r_home, about)
for about in ('LAPACK_LIBS', 'BLAS_LIBS'):
config += get_rconfig(r_home, about, True)
print(config.__repr__())
self.include_dirs.extend(config._include_dirs)
self.libraries.extend(config._libraries)
self.library_dirs.extend(config._library_dirs)
if self.r_home_modules is None:
self.library_dirs.extend([os.path.join(r_home, 'modules'), ])
else:
self.library_dirs.extend([self.r_home_modules, ])
#for e in self.extensions:
# self.extra_link_args.extra_link_args(config.extra_link_args)
# e.extra_compile_args.extend(extra_link_args)
def run(self):
_build_ext.run(self)
def get_rversion(r_home):
r_exec = os.path.join(r_home, 'bin', 'R')
# Twist if Win32
if sys.platform == "win32":
if "64 bit" in sys.version:
r_exec = os.path.join(r_home, 'bin', 'x64', 'R')
if sys.version_info >= (3,):
import subprocess
p = subprocess.Popen('"'+r_exec+'" --version',
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=sys.platform!="win32")
rp = p.stdout
else:
rp = os.popen3('"'+r_exec+'" --version')[2]
else:
rp = os.popen('"'+r_exec+'" --version')
rversion = rp.readline()
#Twist if 'R RHOME' spits out a warning
if rversion.startswith("WARNING"):
rversion = rp.readline()
m = re.match('^R ([^ ]+) ([^ ]+) .+$', rversion)
if m is None:
rp.close()
# return dummy version 0.0
rversion = [0, 0]
else:
rversion = m.groups()[1]
if m.groups()[0] == 'version':
rversion = rversion.split('.')
rversion[0] = int(rversion[0])
rversion[1] = int(rversion[1])
else:
rversion = ['development', '']
rp.close()
return rversion
def cmp_version(x, y):
if (x[0] < y[0]):
return -1
if (x[0] > y[0]):
return 1
if (x[0] == y[0]):
if len(x) == 1 or len(y) == 1:
return 0
return cmp_version(x[1:], y[1:])
class RConfig(object):
_include_dirs = None
_libraries = None
_library_dirs = None
_extra_link_args = None
_frameworks = None
_framework_dirs = None
def __init__(self,
include_dirs = tuple(), libraries = tuple(),
library_dirs = tuple(), extra_link_args = tuple(),
frameworks = tuple(),
framework_dirs = tuple()):
for k in ('include_dirs', 'libraries',
'library_dirs', 'extra_link_args'):
v = locals()[k]
if not isinstance(v, tuple):
if isinstance(v, str):
v = [v, ]
v = tuple(set(v))
self.__dict__['_'+k] = v
# frameworks are specific to OSX
for k in ('framework_dirs', 'frameworks'):
v = locals()[k]
if not isinstance(v, tuple):
if isinstance(v, str):
v = [v, ]
v = tuple(set(v))
self.__dict__['_'+k] = v
self.__dict__['_'+'extra_link_args'] = tuple(set(v + self.__dict__['_'+'extra_link_args']))
@staticmethod
def from_string(string, allow_empty = False):
possible_patterns = ('^-L(?P<library_dirs>[^ ]+)$',
'^-l(?P<libraries>[^ ]+)$',
'^-I(?P<include_dirs>[^ ]+)$',
'^(?P<framework_dirs>-F[^ ]+?)$',
'^(?P<frameworks>-framework [^ ]+)$',
'^(?P<extra_link_args>-Wl[^ ]+)$')
pp = [re.compile(x) for x in possible_patterns]
# sanity check of what is returned into rconfig
rconfig_m = None
span = (0, 0)
rc = RConfig()
for substring in re.split('(?<!-framework) ', string):
ok = False
for pattern in pp:
rconfig_m = pattern.match(substring)
if rconfig_m is not None:
rc += RConfig(**rconfig_m.groupdict())
span = rconfig_m.span()
ok = True
break
elif rconfig_m is None:
if allow_empty:
print('\nreturned an empty string.\n')
rc += RConfig()
ok = True
break
else:
# if the configuration points to an existing library,
# use it
if os.path.exists(string):
rc += RConfig(libraries = substring)
ok = True
break
if not ok:
raise ValueError('Invalid substring\n' + substring
+ '\nin string\n' + string)
return rc
def __repr__(self):
s = 'Configuration for R as a library:' + os.linesep
s += os.linesep.join(
[' ' + x + ': ' + self.__dict__['_'+x].__repr__() \
for x in ('include_dirs', 'libraries',
'library_dirs', 'extra_link_args')])
s += os.linesep + ' # OSX-specific (included in extra_link_args)' + os.linesep
s += os.linesep.join(
[' ' + x + ': ' + self.__dict__['_'+x].__repr__() \
for x in ('framework_dirs', 'frameworks')]
)
return s
def __add__(self, config):
assert isinstance(config, RConfig)
res = RConfig(include_dirs = self._include_dirs + \
config._include_dirs,
libraries = self._libraries + config._libraries,
library_dirs = self._library_dirs + \
config._library_dirs,
extra_link_args = self._extra_link_args + \
config._extra_link_args,
frameworks = self._frameworks + config._frameworks,
framework_dirs = self._framework_dirs + config._framework_dirs)
return res
def get_rconfig(r_home, about, allow_empty = False):
if sys.platform == "win32" and "64 bit" in sys.version:
r_exec = os.path.join(r_home, 'bin', 'x64', 'R')
else:
r_exec = os.path.join(r_home, 'bin', 'R')
cmd = '"'+r_exec+'" CMD config '+about
print(cmd)
rp = os.popen(cmd)
rconfig = rp.readline()
#Twist if 'R RHOME' spits out a warning
if rconfig.startswith("WARNING"):
rconfig = rp.readline()
rconfig = rconfig.strip()
try:
rc = RConfig.from_string(rconfig, allow_empty = allow_empty)
except ValueError as ve:
print(ve)
sys.exit("Problem while running `{0}`\n".format(cmd))
rp.close()
return rc
def getRinterface_ext():
#r_libs = [os.path.join(RHOME, 'lib'), os.path.join(RHOME, 'modules')]
r_libs = []
extra_link_args = []
extra_compile_args = []
#FIXME: crude way (will break in many cases)
#check how to get how to have a configure step
define_macros = []
if sys.platform == 'win32':
define_macros.append(('Win32', 1))
if "64 bit" in sys.version:
define_macros.append(('Win64', 1))
extra_link_args.append('-m64')
extra_compile_args.append('-m64')
# MS_WIN64 only defined by pyconfig.h for MSVC.
# See http://bugs.python.org/issue4709
define_macros.append(('MS_WIN64', 1))
else:
define_macros.append(('R_INTERFACE_PTRS', 1))
define_macros.append(('HAVE_POSIX_SIGJMP', 1))
define_macros.append(('RIF_HAS_RSIGHAND', 1))
define_macros.append(('CSTACK_DEFNS', 1))
define_macros.append(('HAS_READLINE', 1))
if sys.byteorder == 'big':
define_macros.append(('RPY_BIGENDIAN', 1))
else:
pass
include_dirs = []
rinterface_ext = Extension(
name = pack_name + '.rinterface._rinterface',
sources = [ \
#os.path.join('rpy', 'rinterface', 'embeddedr.c'),
#os.path.join('rpy', 'rinterface', 'r_utils.c'),
#os.path.join('rpy', 'rinterface', 'buffer.c'),
#os.path.join('rpy', 'rinterface', 'sequence.c'),
#os.path.join('rpy', 'rinterface', 'sexp.c'),
os.path.join(package_prefix,
'rpy', 'rinterface', '_rinterface.c')
],
depends = [os.path.join(package_prefix,
'rpy', 'rinterface', 'embeddedr.h'),
os.path.join(package_prefix,
'rpy', 'rinterface', 'r_utils.h'),
os.path.join(package_prefix,
'rpy', 'rinterface', 'buffer.h'),
os.path.join(package_prefix,
'rpy', 'rinterface', 'sequence.h'),
os.path.join(package_prefix,
'rpy', 'rinterface', 'sexp.h'),
os.path.join(package_prefix,
'rpy', 'rinterface', '_rinterface.h'),
os.path.join(package_prefix,
'rpy', 'rinterface', 'rpy_device.h')
],
include_dirs = [os.path.join(package_prefix,
'rpy', 'rinterface'),] + include_dirs,
libraries = ['R', ],
library_dirs = r_libs,
define_macros = define_macros,
runtime_library_dirs = r_libs,
extra_compile_args=extra_compile_args,
#extra_compile_args=['-O0', '-g'],
extra_link_args = extra_link_args
)
rpy_device_ext = Extension(
pack_name + '.rinterface._rpy_device',
[
os.path.join(package_prefix,
'rpy', 'rinterface', '_rpy_device.c'),
],
include_dirs = include_dirs +
[os.path.join('rpy', 'rinterface'), ],
libraries = ['R', ],
library_dirs = r_libs,
define_macros = define_macros,
runtime_library_dirs = r_libs,
extra_compile_args=extra_compile_args,
#extra_compile_args=['-O0', '-g'],
extra_link_args = extra_link_args
)
return [rinterface_ext, rpy_device_ext]
if __name__ == '__main__':
rinterface_exts = []
ri_ext = getRinterface_ext()
rinterface_exts.append(ri_ext)
pack_dir = {pack_name: os.path.join(package_prefix, 'rpy')}
import distutils.command.install
for scheme in distutils.command.install.INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
setup(
#install_requires=['distribute'],
cmdclass = {'build': build,
'build_ext': build_ext},
name = pack_name,
version = pack_version,
description = "Python interface to the R language (embedded R)",
url = "http://rpy.sourceforge.net",
license = "AGPLv3.0 (except rpy2.rinterface: LGPL)",
author = "Laurent Gautier",
author_email = "[email protected]",
ext_modules = rinterface_exts[0],
package_dir = pack_dir,
packages = [pack_name,
pack_name + '.rlike',
pack_name + '.rlike.tests',
pack_name + '.rinterface',
pack_name + '.rinterface.tests',
pack_name + '.robjects',
pack_name + '.robjects.tests',
pack_name + '.robjects.lib',
pack_name + '.robjects.lib.tests',
pack_name + '.interactive',
pack_name + '.interactive.tests'
],
classifiers = ['Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Development Status :: 5 - Production/Stable'
],
package_data = {
'rpy2': ['images/*.png', ],
'rpy2': ['doc/source/rpy2_logo.png', ]}
#[pack_name + '.rinterface_' + x for x in rinterface_rversions] + \
#[pack_name + '.rinterface_' + x + '.tests' for x in rinterface_rversions]
)
| gpl-2.0 | 3,687,832,839,798,086,000 | 37.715415 | 105 | 0.503063 | false |
jgr208/stig-fix-el6-kickstart | config/stig-fix/menu.py | 1 | 49277 | #!/usr/bin/python
# Graphical Kickstart Script
#
# This script was written by Frank Caviggia, Red Hat Consulting
# edit by Jason Ricles, Mikros Systems Corp
# Last update was 7 December 2015
# This script is NOT SUPPORTED by Red Hat Global Support Services.
# Please contact Josh Waldman for more information.
#
# Author: Frank Caviggia ([email protected])
# Copyright: Red Hat, (C) 2013
# Version: 1.3
# License: GPLv2
import os,sys,re,crypt,random
try:
os.environ['DISPLAY']
import pygtk,gtk
except:
print "Error: DISPLAY environment varible not set."
sys.exit(1)
# Class containing verification items
class Verification:
# Name/Comment Check
def check_name(self,name):
pattern = re.compile(r"^[ a-zA-Z']+$",re.VERBOSE)
if re.match(pattern,name):
return True
else:
return False
# Check for vaild Unix username
def check_username(self,username):
pattern = re.compile(r"^\w{5,255}$",re.VERBOSE)
if re.match(pattern,username):
return True
else:
return False
# Check for vaild Unix UID
def check_uid(self,uid):
pattern = re.compile(r"^\d{1,10}$",re.VERBOSE)
if re.match(pattern,uid):
return True
else:
return False
# Check for vaild IP address
def check_ip(self,ip):
pattern = re.compile(r"\b(([01]?\d?\d|2[0-4]\d|25[0-5])\.){3}([01]?\d?\d|2[0-4]\d|25[0-3])\b",re.VERBOSE)
if re.match(pattern,ip) and ip != "0.0.0.0":
return True
else:
return False
# Check for vaild system hostanme
def check_hostname(self,hostname):
pattern = re.compile(r"^[a-zA-Z0-9\-\.]{1,100}$",re.VERBOSE)
if re.match(pattern,hostname):
return True
else:
return False
# Display Menu
class Display_Menu:
def __init__(self):
# Initalize Additional Configuration Files
f = open('/tmp/stig-fix-post','w')
f.write('')
f.close()
f = open('/tmp/stig-fix-packages','w')
f.write('')
f.close()
# Data Storage
self.data = {}
# Verification Functions
self.verify = Verification()
# Create Main Window
self.window = gtk.Window()
self.window.set_title("Red Hat Enterprise Linux - DISA STIG Installation")
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.connect("delete_event",gtk.main_quit)
self.display = gtk.gdk.display_get_default()
self.screen = self.display.get_default_screen()
self.hres = self.screen.get_width()
self.vres = self.screen.get_height()
self.window.connect("key-release-event",self.event_key)
# Create Main Vertical Box to Populate
self.vbox = gtk.VBox()
if self.hres == 640:
self.window.resize(640,480)
elif self.hres > 640:
self.window.resize(800,600)
# RedHat Logo
self.logo = gtk.Image()
self.logo.set_from_file("/usr/share/anaconda/pixmaps/anaconda_header.png")
self.logo.set_alignment(0,0)
self.logo.set_padding(0,0)
self.vbox.add(self.logo)
# Creates Header
self.header = gtk.HBox()
self.label = gtk.Label("<span font_family='liberation-sans' weight='bold' foreground='red' size='large'> Red Hat Enterprise Linux - DISA STIG Installation </span>")
self.label.set_use_markup(True)
self.header.add(self.label)
self.vbox.add(self.header)
# Creates Information Message
self.label = gtk.Label('This DVD installs Red Hat Enterprise Linux 6 with configurations required by the DISA STIG.')
self.vbox.add(self.label)
self.label = gtk.Label('RHEL 6 (STIG Installer v.1.3)')
self.vbox.add(self.label)
# Blank Label
self.label = gtk.Label("")
self.vbox.add(self.label)
# System Configuration
self.system = gtk.HBox()
self.label = gtk.Label(" Hostame: ")
self.system.pack_start(self.label,False,True, 0)
self.hostname = gtk.Entry(100)
self.hostname.set_size_request(225,-1)
self.system.pack_start(self.hostname,False,True,0)
try:
if os.environ['HOSTNAME'] != '':
self.hostname.set_text(os.environ['HOSTNAME'])
else:
self.hostname.set_text('localhost.localdomain')
except:
self.hostname.set_text('localhost.localdomain')
self.label = gtk.Label(" System Profile: ")
self.system.pack_start(self.label,False,True, 0)
self.system_profile = gtk.combo_box_new_text()
self.system_profile.append_text("Minimal Installation")
self.system_profile.append_text("User Workstation")
self.system_profile.append_text("Developer Workstation")
self.system_profile.append_text("RHN Satellite Server")
self.system_profile.append_text("Proprietary Database Server")
self.system_profile.append_text("RHEV-Attached KVM Server")
#self.system_profile.append_text("Standalone KVM Server")
#self.system_profile.append_text("Apache Web Server")
#self.system_profile.append_text("Tomcat Web Server")
#self.system_profile.append_text("PostgreSQL Database Server")
#self.system_profile.append_text("MySQL Database Server")
self.system_profile.set_active(0)
self.system_profile.connect('changed',self.configure_system_profile)
self.system.pack_start(self.system_profile,False,True,0)
self.vbox.add(self.system)
self.classification = gtk.HBox()
self.label = gtk.Label(" System Classification: ")
self.classification.pack_start(self.label,False,True, 0)
self.system_classification = gtk.combo_box_new_text()
self.system_classification.append_text("UNCLASSIFIED")
self.system_classification.append_text("UNCLASSIFIED//FOUO")
self.system_classification.append_text("CONFIDENTIAL")
self.system_classification.append_text("SECRET")
self.system_classification.append_text("TOP SECRET")
self.system_classification.append_text("TOP SECRET//SCI")
self.system_classification.append_text("TOP SECRET//SCI//NOFORN")
self.system_classification.set_active(0)
self.classification.pack_start(self.system_classification,False,True,0)
self.vbox.add(self.classification)
# Blank Label
self.label = gtk.Label("")
self.vbox.add(self.label)
# System Information
self.cpu_cores = 0
self.cpu_model = ''
self.cpu_arch = ''
self.system_memory = {}
with open('/proc/cpuinfo') as f:
for line in f:
if line.strip():
if line.rstrip('\n').startswith('model name'):
self.cpu_model = line.rstrip('\n').split(':')[1]
self.cpu_cores += 1
elif line.rstrip('\n').startswith('flags') or line.rstrip('\n').startswith('Features'):
if 'lm' in line.rstrip('\n').split():
self.cpu_arch = '64-bit'
else:
self.cpu_arch = '32-bit'
f.close()
with open('/proc/meminfo') as f:
for line in f:
self.system_memory[line.split(':')[0]] = line.split(':')[1].strip()
f.close()
self.cpu_information = gtk.HBox()
self.label = gtk.Label(" CPU Model: ")
self.cpu_information.pack_start(self.label,False,True, 0)
self.label = gtk.Label(" %s "%(self.cpu_model))
self.cpu_information.pack_start(self.label,False,True, 0)
self.label = gtk.Label(" CPU Threads: ")
self.cpu_information.pack_start(self.label,False,True, 0)
self.label = gtk.Label(" %d "%(self.cpu_cores))
self.cpu_information.pack_start(self.label,False,True, 0)
self.label = gtk.Label(" Architecure: ")
self.cpu_information.pack_start(self.label,False,True, 0)
self.label = gtk.Label(" %s "%(self.cpu_arch))
self.cpu_information.pack_start(self.label,False,True, 0)
self.vbox.add(self.cpu_information)
self.memory_information = gtk.HBox()
self.label = gtk.Label(" Total System Memory: ")
self.memory_information.pack_start(self.label,False,True, 0)
self.label = gtk.Label(" %s "%(self.system_memory['MemTotal']))
self.memory_information.pack_start(self.label,False,True, 0)
self.label = gtk.Label(" Free Memory: ")
self.memory_information.pack_start(self.label,False,True, 0)
self.label = gtk.Label(" %s "%(self.system_memory['MemFree']))
self.memory_information.pack_start(self.label,False,True, 0)
self.vbox.add(self.memory_information)
# Disk Partitioning Section
self.label = gtk.Label("\n<span font_family='liberation-sans' weight='bold'>Disk Partitioning</span>")
self.label.set_use_markup(True)
self.vbox.add(self.label)
# Blank Label
self.label = gtk.Label("")
self.vbox.add(self.label)
# List Disks
self.disk_list = gtk.HBox()
self.disk_info = []
self.disk_total = 0
self.output = os.popen('list-harddrives')
for self.line in self.output:
self.line = self.line.strip()
if not ('fd0' in self.line or 'sr0' in self.line):
self.disk_info.append(self.line.split(' '))
self.label = gtk.Label(" Available Disks: ")
self.disk_list.pack_start(self.label, False, True, 0)
if len(self.disk_info) == 0:
self.label = gtk.Label("No Drives Available.")
self.disk_list.pack_start(self.label,False,True,0)
else:
for i in range(len(self.disk_info)):
if len(self.disk_info) > 5:
exec("self.disk%d = gtk.CheckButton(self.disk_info[%d][0])"%(i,i))
else:
exec("self.disk%s = gtk.CheckButton(self.disk_info[%d][0] +' ('+ str(int(float(self.disk_info[%d][1]))/1024) +'Gb)')"%(i,i,i))
exec("self.disk%d.set_active(True)"%(i))
exec("self.disk_list.pack_start(self.disk%d, False, True, 0)"%(i))
self.disk_total += int(float(self.disk_info[i][1])/1024)
self.vbox.add(self.disk_list)
# Disk Encryption (Ability to disable LUKS for self encrypting drives)
self.encrypt = gtk.HBox()
self.core = gtk.HBox()
self.tim = gtk.HBox()
self.label = gtk.Label(" ")
self.encrypt.pack_start(self.label, False, True, 0)
self.label = gtk.Label(" ")
self.core.pack_start(self.label, False, True, 0)
self.label = gtk.Label(" ")
self.tim.pack_start(self.label, False, True, 0)
self.encrypt_disk = gtk.CheckButton('Encrypt Drives with LUKS')
self.core_install = gtk.CheckButton('CORE')
self.tim_install = gtk.CheckButton('TIM')
self.encrypt_disk.set_active(True)
self.core_install.set_active(False)
self.tim_install.set_active(False)
self.encrypt.pack_start(self.encrypt_disk, False, True, 0)
self.core.pack_start(self.core_install, False, True, 0)
self.tim.pack_start(self.tim_install, False, True, 0)
self.tim_install.connect("clicked",self.choose)
self.core_install.connect("clicked",self.choose)
self.vbox.add(self.encrypt)
self.vbox.add(self.core)
self.vbox.add(self.tim)
# Minimal Installation Warning
if self.disk_total < 8:
self.MessageBox(self.window,"<b>Recommended minimum of 8Gb disk space for a Minimal Install!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
# Blank Label
self.label = gtk.Label("")
self.vbox.add(self.label)
# Partitioning
self.label = gtk.Label('Required LVM Partitioning Percentage')
self.vbox.add(self.label)
self.partitioning1 = gtk.HBox()
self.label = gtk.Label(" ROOT (/) ")
self.partitioning1.pack_start(self.label,False,True,0)
self.root_range = gtk.Adjustment(45,1,95,1,0, 0)
self.root_partition = gtk.SpinButton(adjustment=self.root_range,climb_rate=1,digits=0)
self.root_partition.connect('value-changed',self.lvm_check)
self.partitioning1.pack_start(self.root_partition,False,True,0)
self.label = gtk.Label("% HOME (/home) ")
self.partitioning1.pack_start(self.label,False,True,0)
self.home_range = gtk.Adjustment(15,1,95,1,0, 0)
self.home_partition = gtk.SpinButton(adjustment=self.home_range,climb_rate=1,digits=0)
self.home_partition.connect('value-changed',self.lvm_check)
self.partitioning1.pack_start(self.home_partition,False,True,0)
self.label = gtk.Label("% TMP (/tmp) ")
self.partitioning1.pack_start(self.label,False,True,0)
self.tmp_range = gtk.Adjustment(10,1,60,1,0, 0)
self.tmp_partition = gtk.SpinButton(adjustment=self.tmp_range,climb_rate=1,digits=0)
self.tmp_partition.connect('value-changed',self.lvm_check)
self.partitioning1.pack_start(self.tmp_partition,False,True,0)
self.label = gtk.Label("% VAR (/var) ")
self.partitioning1.pack_start(self.label,False,True,0)
self.var_range = gtk.Adjustment(10,1,95,1,0, 0)
self.var_partition = gtk.SpinButton(adjustment=self.var_range,climb_rate=1,digits=0)
self.var_partition.connect('value-changed',self.lvm_check)
self.partitioning1.pack_start(self.var_partition,False,True,0)
self.label = gtk.Label("%")
self.partitioning1.pack_start(self.label,False,True,0)
self.vbox.add(self.partitioning1)
self.partitioning2 = gtk.HBox()
self.label = gtk.Label(" LOG (/var/log) ")
self.partitioning2.pack_start(self.label,False,True,0)
self.log_range = gtk.Adjustment(10,1,75,1,0, 0)
self.log_partition = gtk.SpinButton(adjustment=self.log_range,climb_rate=1,digits=0)
self.log_partition.connect('value-changed',self.lvm_check)
self.partitioning2.pack_start(self.log_partition,False,True,0)
self.label = gtk.Label("% AUDIT (/var/log/audit) ")
self.partitioning2.pack_start(self.label,False,True,0)
self.audit_range = gtk.Adjustment(10,1,75,1,0, 0)
self.audit_partition = gtk.SpinButton(adjustment=self.audit_range,climb_rate=1,digits=0)
self.audit_partition.connect('value-changed',self.lvm_check)
self.partitioning2.pack_start(self.audit_partition,False,True,0)
self.label = gtk.Label("% SWAP ")
self.partitioning2.pack_start(self.label,False,True,0)
self.swap_range = gtk.Adjustment(0,0,25,1,0, 0)
self.swap_partition = gtk.SpinButton(adjustment=self.swap_range,climb_rate=1,digits=0)
self.swap_partition.connect('value-changed',self.lvm_check)
self.partitioning2.pack_start(self.swap_partition,False,True,0)
self.label = gtk.Label("%")
self.partitioning2.pack_start(self.label,False,True,0)
self.vbox.add(self.partitioning2)
# Blank Label
self.label = gtk.Label("")
self.vbox.add(self.label)
self.label = gtk.Label('Optional LVM Partitioning Percentage')
self.vbox.add(self.label)
self.partitioning3 = gtk.HBox()
self.label = gtk.Label(" WWW (/var/www) ")
self.partitioning3.pack_start(self.label,False,True,0)
self.www_range = gtk.Adjustment(0,0,90,1,0, 0)
self.www_partition = gtk.SpinButton(adjustment=self.www_range,climb_rate=1,digits=0)
self.www_partition.connect('value-changed',self.lvm_check)
self.partitioning3.pack_start(self.www_partition,False,True,0)
self.label = gtk.Label("% OPT (/opt) ")
self.partitioning3.pack_start(self.label,False,True,0)
self.opt_range = gtk.Adjustment(0,0,90,1,0, 0)
self.opt_partition = gtk.SpinButton(adjustment=self.opt_range,climb_rate=1,digits=0)
self.opt_partition.connect('value-changed',self.lvm_check)
self.partitioning3.pack_start(self.opt_partition,False,True,0)
self.label = gtk.Label("%")
self.partitioning3.pack_start(self.label,False,True,0)
self.vbox.add(self.partitioning3)
# Blank Label
self.label = gtk.Label("")
self.vbox.add(self.label)
self.partition_message = gtk.HBox()
self.label = gtk.Label(' Note: LVM Partitions should add up to 100% or less before proceeding. <b>Currently Used:</b> ')
self.label.set_use_markup(True)
self.partition_message.pack_start(self.label,False,True,0)
self.partition_used = gtk.Label('100%')
self.partition_message.pack_start(self.partition_used,False,True,0)
self.vbox.add(self.partition_message)
# Button Bar at the Bottom of the Window
self.label = gtk.Label("")
self.vbox.add(self.label)
self.button_bar = gtk.HBox()
# Apply Configurations
self.button1 = gtk.Button(None,gtk.STOCK_OK)
self.button1.connect("clicked",self.apply_configuration)
self.button_bar.pack_end(self.button1,False,True,0)
# Help
self.button2 = gtk.Button(None,gtk.STOCK_HELP)
self.button2.connect("clicked",self.show_help_main)
self.button_bar.pack_end(self.button2,False,True,0)
self.vbox.add(self.button_bar)
self.window.add(self.vbox)
self.window.show_all()
## STOCK CONFIGURATIONS (Minimal Install)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('-telnet-server\n')
f.write('-java-1.7.0-openjdk-devel\n')
f.write('-java-1.6.0-openjdk-devel\n')
f.write('gcc-c++\n')
f.write('dos2unix\n')
f.write('kernel-devel\n')
f.write('gcc\n')
f.write('dialog\n')
f.write('dmidecode\n')
f.write('aide\n')
f.close()
# Key Press Event
def event_key(self,args,event):
if event.keyval == gtk.keysyms.F12:
self.apply_configuration(args)
elif event.keyval == gtk.keysyms.F1:
self.show_help_main(args)
# Shows Help for Main Install
def show_help_main(self,args):
self.help_text = ("<b>Install Help</b>\n\n- All LVM partitions need to take less than or equal to 100% of the LVM Volume Group.\n\n- Pressing OK prompts for a password to encrypt Disk (LUKS) and Root password. GRUB is installed with a randomly generated password. Use the 'grubby' command to modify grub configuration and the 'grub-crypt' command to generate a new password for grub.\n\n- To access root remotely via ssh you need to create a user and add them to the wheel and sshusers groups.\n\n- Minimum password length is 15 characters, using a strong password is recommended.\n")
self.MessageBox(self.window,self.help_text,gtk.MESSAGE_INFO)
# System Profile Configuration
def configure_system_profile(self,args):
# Zero out partitioning
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(0)
self.var_partition.set_value(0)
self.log_partition.set_value(0)
self.audit_partition.set_value(0)
self.home_partition.set_value(0)
self.root_partition.set_value(0)
################################################################################################################
# Minimal (Defualts to Kickstart)
################################################################################################################
if int(self.system_profile.get_active()) == 0:
# Partitioning
if self.disk_total < 8:
self.MessageBox(self.window,"<b>Recommended minimum of 8Gb disk space for a Minimal Install!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(10)
self.var_partition.set_value(10)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.home_partition.set_value(15)
self.root_partition.set_value(45)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('-telnet-server\n')
f.write('-java-1.7.0-openjdk-devel\n')
f.write('-java-1.6.0-openjdk-devel\n')
f.write('gcc-c++\n')
f.write('dos2unix\n')
f.write('kernel-devel\n')
f.write('gcc\n')
f.write('dialog\n')
f.write('dmidecode\n')
f.write('aide\n')
f.close()
################################################################################################################
# User Workstation
################################################################################################################
if int(self.system_profile.get_active()) == 1:
# Partitioning
if self.disk_total < 12:
self.MessageBox(self.window,"<b>Recommended minimum of 12Gb disk space for a User Workstation!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(5)
self.tmp_partition.set_value(10)
self.var_partition.set_value(10)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.home_partition.set_value(10)
self.root_partition.set_value(45)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('cp /mnt/source/stig-fix/classification-banner.py /mnt/sysimage/usr/local/bin/\n')
f.write('chmod a+rx /mnt/sysimage/usr/local/bin/classification-banner.py\n')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('@additional-devel\n')
f.write('@basic-desktop\n')
f.write('@desktop-platform\n')
f.write('@directory-client\n')
f.write('@general-desktop\n')
f.write('@graphical-admin-tools\n')
f.write('@input-methods\n')
f.write('@internet-browser\n')
f.write('@legacy-x\n')
f.write('@x11\n')
f.write('pcsc*\n')
f.write('aide\n')
f.write('coolkey\n')
f.write('liberation-*\n')
f.write('dejavu-*\n')
f.write('krb5-auth-dialog\n')
f.write('seahorse-plugins\n')
f.write('vim-X11\n')
f.write('gcc-c++\n')
f.write('dos2unix\n')
f.write('kernel-devel\n')
f.write('gcc\n')
f.write('dialog\n')
f.write('dmidecode\n')
f.write('policycoreutils-gui\n')
f.write('system-config-lvm\n')
f.write('audit-viewer\n')
f.write('openmotif\n')
f.write('libXmu\n')
f.write('libXp\n')
f.write('openmotif22\n')
f.write('-samba-winbind\n')
f.write('-certmonger\n')
f.write('-gnome-applets\n')
f.write('-vino\n')
f.write('-ypbind\n')
f.write('-cheese\n')
f.write('-gnome-backgrounds\n')
f.write('-compiz-gnome\n')
f.write('-gnome-bluetooth\n')
f.write('-gnome-user-share\n')
f.write('-sound-juicer\n')
f.write('-rhythmbox\n')
f.write('-brasero\n')
f.write('-brasero-nautilus\n')
f.write('-brasero-libs\n')
f.write('-NetworkManager\n')
f.write('-NetworkManager-gnome\n')
f.write('-evolution-data-server\n')
f.write('-NetworkManager-glib\n')
f.write('-m17n-contrib-bengali\n')
f.write('-m17n-contrib-punjabi\n')
f.write('-ibus-sayura\n')
f.write('-m17n-contrib-assamese\n')
f.write('-m17n-contrib-oriya\n')
f.write('-m17n-contrib-kannada\n')
f.write('-m17n-contrib-telugu\n')
f.write('-m17n-contrib-hindi\n')
f.write('-m17n-contrib-maithili\n')
f.write('-m17n-db-sinhala\n')
f.write('-m17n-contrib-marathi\n')
f.write('-m17n-db-thai\n')
f.write('-ibus-pinyin\n')
f.write('-m17n-contrib-urdu\n')
f.write('-m17n-contrib-tamil\n')
f.write('-ibus-chewing\n')
f.write('-ibus-hangul\n')
f.write('-ibus-anthy\n')
f.write('-m17n-contrib-malayalam\n')
f.write('-m17n-contrib-gujarati\n')
f.write('-telnet-server\n')
f.write('-java-1.7.0-openjdk-devel\n')
f.write('-java-1.6.0-openjdk-devel\n')
f.close()
################################################################################################################
# Developer Workstation
################################################################################################################
if int(self.system_profile.get_active()) == 2:
# Partitioning
if self.disk_total < 16:
self.MessageBox(self.window,"<b>Recommended minimum 16Gb disk space for a Developer Workstation!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(10)
self.var_partition.set_value(10)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.home_partition.set_value(25)
self.root_partition.set_value(30)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('cp /mnt/source/stig-fix/classification-banner.py /mnt/sysimage/usr/local/bin/\n')
f.write('chmod a+rx /mnt/sysimage/usr/local/bin/classification-banner.py\n')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('@additional-devel\n')
f.write('@basic-desktop\n')
f.write('@desktop-platform\n')
f.write('@desktop-platform-devel\n')
f.write('@development\n')
f.write('@directory-client\n')
f.write('@eclipse\n')
f.write('@general-desktop\n')
f.write('@graphical-admin-tools\n')
f.write('@input-methods\n')
f.write('@internet-browser\n')
f.write('@legacy-x\n')
f.write('@server-platform-devel\n')
f.write('@x11\n')
f.write('pcsc*\n')
f.write('coolkey\n')
f.write('liberation-*\n')
f.write('dejavu-*\n')
f.write('libXinerama-devel\n')
f.write('openmotif-devel\n')
f.write('libXmu-devel\n')
f.write('xorg-x11-proto-devel\n')
f.write('startup-notification-devel\n')
f.write('libgnomeui-devel\n')
f.write('libbonobo-devel\n')
f.write('junit\n')
f.write('libXau-devel\n')
f.write('libgcrypt-devel\n')
f.write('popt-devel\n')
f.write('gnome-python2-desktop\n')
f.write('libdrm-devel\n')
f.write('libXrandr-devel\n')
f.write('libxslt-devel\n')
f.write('libglade2-devel\n')
f.write('gnutls-devel\n')
f.write('desktop-file-utils\n')
f.write('ant\n')
f.write('rpmdevtools\n')
f.write('jpackage-utils\n')
f.write('rpmlint\n')
f.write('krb5-auth-dialog\n')
f.write('seahorse-plugins\n')
f.write('vim-X11\n')
f.write('system-config-lvm\n')
f.write('audit-viewer\n')
f.write('openmotif\n')
f.write('libXmu\n')
f.write('libXp\n')
f.write('openmotif22\n')
f.write('-samba-winbind\n')
f.write('-certmonger\n')
f.write('-gnome-applets\n')
f.write('-vino\n')
f.write('-ypbind\n')
f.write('-cheese\n')
f.write('-gnome-backgrounds\n')
f.write('-compiz-gnome\n')
f.write('-gnome-bluetooth\n')
f.write('-gnome-user-share\n')
f.write('-sound-juicer\n')
f.write('-rhythmbox\n')
f.write('-brasero\n')
f.write('-brasero-nautilus\n')
f.write('-brasero-libs\n')
f.write('-NetworkManager\n')
f.write('-NetworkManager-gnome\n')
f.write('-evolution-data-server\n')
f.write('-evolution-data-server-devel\n')
f.write('-NetworkManager-glib\n')
f.write('-m17n-contrib-bengali\n')
f.write('-m17n-contrib-punjabi\n')
f.write('-ibus-sayura\n')
f.write('-m17n-contrib-assamese\n')
f.write('-m17n-contrib-oriya\n')
f.write('-m17n-contrib-kannada\n')
f.write('-m17n-contrib-telugu\n')
f.write('-m17n-contrib-hindi\n')
f.write('-m17n-contrib-maithili\n')
f.write('-m17n-db-sinhala\n')
f.write('-m17n-contrib-marathi\n')
f.write('-m17n-db-thai\n')
f.write('-ibus-pinyin\n')
f.write('-m17n-contrib-urdu\n')
f.write('-m17n-contrib-tamil\n')
f.write('-ibus-chewing\n')
f.write('-ibus-hangul\n')
f.write('-ibus-anthy\n')
f.write('-m17n-contrib-malayalam\n')
f.write('-m17n-contrib-gujarati\n')
f.close()
################################################################################################################
# RHN Satellite Install
################################################################################################################
if int(self.system_profile.get_active()) == 3:
# Partitioning
if self.disk_total < 120:
self.MessageBox(self.window,"<b>Recommended minimum of 120Gb disk space for a RHN Satelite Server!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(3)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(2)
self.var_partition.set_value(80)
self.log_partition.set_value(3)
self.audit_partition.set_value(3)
self.home_partition.set_value(3)
self.root_partition.set_value(5)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
# RHN Satellite requires umask of 022 for installation
f.write('sed -i "/umask/ c\umask 022" /etc/profile\n')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('')
f.close()
################################################################################################################
# Proprietary Database
################################################################################################################
if int(self.system_profile.get_active()) == 4:
# Partitioning
if self.disk_total < 60:
self.MessageBox(self.window,"<b>Recommended minimum of 60Gb disk space for a Proprietary Database Server!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.www_partition.set_value(0)
self.home_partition.set_value(5)
self.swap_partition.set_value(0)
self.var_partition.set_value(7)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.tmp_partition.set_value(15)
self.opt_partition.set_value(30)
self.root_partition.set_value(18)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('cp /mnt/source/stig-fix/classification-banner.py /mnt/sysimage/usr/local/bin/\n')
f.write('chmod a+rx /mnt/sysimage/usr/local/bin/classification-banner.py\n')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('xorg-x11-server-Xorg\n')
f.write('xorg-x11-xinit\n')
f.write('xterm\n')
f.write('twm\n')
f.write('liberation-*\n')
f.write('dejavu-*\n')
f.write('openmotif\n')
f.write('libXmu\n')
f.write('libXp\n')
f.write('openmotif22\n')
f.write('kernel-devel\n')
f.write('kernel-headers\n')
f.write('gcc\n')
f.write('gcc-c++\n')
f.write('libgcc\n')
f.write('autoconf\n')
f.write('make\n')
f.write('libstdc++\n')
f.write('compat-libstdc++\n')
f.write('libaio\n')
f.write('libaio-devel\n')
f.write('unixODBC\n')
f.write('unixODBC-devel\n')
f.write('sysstat\n')
f.write('ksh\n')
f.close()
################################################################################################################
# RHEV-Attached KVM Server (HARDENING SCRIPT NOT RUN UNTIL AFTER CONNECTION TO RHEVM SERVER)
################################################################################################################
if int(self.system_profile.get_active()) == 5:
# WARNING - HARDENDING SCRIPT NOT RUN!
self.MessageBox(self.window,"<b>THIS PROFILE WILL NOT RUN THE HARDENING SCRIPT!</b>\n\nPlease run the system hardening script after system has been attached to the RHEV-M server using the following command:\n\n # stig-fix",gtk.MESSAGE_WARNING)
# Partitioning
if self.disk_total < 60:
self.MessageBox(self.window,"<b>Recommended minimum of 60Gb disk space for a RHEV-Attached KVM Server Install!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(10)
self.var_partition.set_value(10)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.home_partition.set_value(25)
self.root_partition.set_value(30)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Allow 'root' to login via SSH - Required by RHEV-M
f.write('sed -i "/^PermitRootLogin/ c\PermitRootLogin yes" /etc/ssh/sshd_config')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('')
f.close()
################################################################################################################
# Standalone KVM Installation
################################################################################################################
if int(self.system_profile.get_active()) == 6:
# Partitioning
if self.disk_total < 60:
self.MessageBox(self.window,"<b>Recommended minimum 60Gb disk space for a RHEL/KVM Server!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(3)
self.var_partition.set_value(65)
self.log_partition.set_value(5)
self.audit_partition.set_value(5)
self.home_partition.set_value(5)
self.root_partition.set_value(15)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('@storage-client-iscsi\n')
f.write('@virtualization\n')
f.write('@virtualization-client\n')
f.write('@virtualization-platform\n')
f.write('@virtualization-tools\n')
f.write('perl-Sys-Virt\n')
f.write('qemu-kvm-tools\n')
f.write('fence-virtd-libvirt\n')
f.write('virt-v2v\n')
f.write('libguestfs-tools\n')
f.close()
################################################################################################################
# Apache HTTP (Web Server)
################################################################################################################
if int(self.system_profile.get_active()) == 7:
# Partitioning
if self.disk_total < 10:
self.MessageBox(self.window,"<b>Recommended minimum of 10Gb disk space for a Web Server!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(10)
self.var_partition.set_value(10)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.home_partition.set_value(25)
self.root_partition.set_value(30)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('httpd\n')
f.close()
################################################################################################################
# Apache Tomcat
################################################################################################################
if int(self.system_profile.get_active()) == 8:
# Partitioning
if self.disk_total < 10:
self.MessageBox(self.window,"<b>Recommended minimum of 10Gb disk space for an Apache Tomcat Web Server!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(10)
self.var_partition.set_value(10)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.home_partition.set_value(25)
self.root_partition.set_value(30)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('tomcat6\n')
f.close()
################################################################################################################
# PostgreSQL Database
################################################################################################################
if int(self.system_profile.get_active()) == 9:
# Partitioning
if self.disk_total < 16:
self.MessageBox(self.window,"<b>Recommended minimum of 16Gb disk space for a PostgreSQL Database Server!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(10)
self.var_partition.set_value(10)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.home_partition.set_value(25)
self.root_partition.set_value(30)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('postgresql\n')
f.close()
################################################################################################################
# MySQL Database
################################################################################################################
if int(self.system_profile.get_active()) == 10:
# Partitioning
if self.disk_total < 16:
self.MessageBox(self.window,"<b>Recommended minimum of 16Gb disk space for a MariaDB Database Server!</b>\n\n You have "+str(self.disk_total)+"Gb available.",gtk.MESSAGE_WARNING)
self.opt_partition.set_value(0)
self.www_partition.set_value(0)
self.swap_partition.set_value(0)
self.tmp_partition.set_value(10)
self.var_partition.set_value(10)
self.log_partition.set_value(10)
self.audit_partition.set_value(10)
self.home_partition.set_value(25)
self.root_partition.set_value(30)
# Post Configuration (nochroot)
f = open('/tmp/stig-fix-post-nochroot','w')
f.write('')
f.close()
# Post Configuration
f = open('/tmp/stig-fix-post','w')
# Run Hardening Script
f.write('/sbin/stig-fix -q &> /dev/null')
f.close()
# Package Selection
f = open('/tmp/stig-fix-packages','w')
f.write('mysql-server\n')
f.close()
# Check LVM Partitioning
def lvm_check(self,args):
self.lvm = self.root_partition.get_value_as_int()+self.home_partition.get_value_as_int()+self.tmp_partition.get_value_as_int()+self.var_partition.get_value_as_int()+self.log_partition.get_value_as_int()+self.audit_partition.get_value_as_int()+self.swap_partition.get_value_as_int()+self.www_partition.get_value_as_int()+self.opt_partition.get_value_as_int()
self.partition_used.set_label(str(self.lvm)+'%')
if int(self.lvm) > 100:
self.MessageBox(self.window,"<b>Verify that LVM configuration is not over 100%!</b>",gtk.MESSAGE_ERROR)
return False
else:
return True
def choose(self, widget):
if self.tim_install.get_active() == True and self.core_install.get_active():
self.MessageBox(self.window,"<b>Can not have both TIM and CORE install!</b>",gtk.MESSAGE_ERROR)
self.tim_install.set_active(False)
self.core_install.set_active(False)
# Display Message Box (e.g. Help Screen, Warning Screen, etc.)
def MessageBox(self,parent,text,type=gtk.MESSAGE_INFO):
message = gtk.MessageDialog(parent,0,type,gtk.BUTTONS_OK)
message.set_markup(text)
response = message.run()
if response == gtk.RESPONSE_OK:
message.destroy()
# Get Password
def get_password(self,parent):
dialog = gtk.Dialog("Configure System Password",parent,gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT,(gtk.STOCK_CANCEL,gtk.RESPONSE_REJECT,gtk.STOCK_OK,gtk.RESPONSE_ACCEPT))
self.pass1 = gtk.HBox()
self.label1 = gtk.Label(" Passsword: ")
self.pass1.pack_start(self.label1,False,True,0)
self.password1 = gtk.Entry()
self.password1.set_visibility(False)
self.pass1.pack_start(self.password1,False,True,0)
dialog.vbox.add(self.pass1)
self.pass2 = gtk.HBox()
self.label2 = gtk.Label(" Verify Password: ")
self.pass2.pack_start(self.label2,False,True,0)
self.password2 = gtk.Entry()
self.password2.set_visibility(False)
self.pass2.pack_start(self.password2,False,True,0)
dialog.vbox.add(self.pass2)
dialog.show_all()
response = dialog.run()
if response == gtk.RESPONSE_ACCEPT:
self.a = self.password1.get_text()
self.b = self.password2.get_text()
dialog.destroy()
else:
self.a = ''
self.b = ''
dialog.destroy()
# Appply Configurations to Kickstart File
def apply_configuration(self,args):
# Set system password
while True:
self.get_password(self.window)
if self.a == self.b:
if len(self.a) == 0:
return
elif len(self.a) >= 15:
self.passwd = self.a
break
else:
self.MessageBox(self.window,"<b>Password too short! 15 Characters Required.</b>",gtk.MESSAGE_ERROR)
else:
self.MessageBox(self.window,"<b>Passwords Don't Match!</b>",gtk.MESSAGE_ERROR)
self.error = 0
if self.verify.check_hostname(self.hostname.get_text()) == False:
self.MessageBox(self.window,"<b>Invalid Hostname!</b>",gtk.MESSAGE_ERROR)
self.error = 1
# Check Install Disks
self.install_disks = ""
self.ignore_disks = ""
for i in range(len(self.disk_info)):
if eval("self.disk%d.get_active()"%(i)) == True:
self.install_disks += self.disk_info[i][0]+","
else:
self.ignore_disks += self.disk_info[i][0]+","
self.data["INSTALL_DRIVES"] = self.install_disks[:-1]
self.data["IGNORE_DRIVES"] = self.ignore_disks[:-1]
if self.install_disks == "":
self.MessageBox(self.window,"<b>Please select at least one install disk!</b>",gtk.MESSAGE_ERROR)
self.error = 1
# Check LVM Partitioning
if self.lvm_check(args) == False:
self.error = 1
# Write Kickstart File
if self.error == 0:
# Generate Salt
self.salt = ''
self.alphabet = '.abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for self.i in range(16):
self.index = random.randrange(len(self.alphabet))
self.salt = self.salt+self.alphabet[self.index]
# Encrypt Password
self.salt = '$6$'+self.salt
self.password = crypt.crypt(self.passwd,self.salt)
# Write Classification Banner Settings
if int(self.system_profile.get_active()) == 1 or int(self.system_profile.get_active()) == 2:
f = open('/tmp/classification-banner','w')
f.write('message = "'+str(self.system_classification.get_active_text())+'"\n')
if int(self.system_classification.get_active()) == 0 or int(self.system_classification.get_active()) == 1:
f.write('fgcolor = "#000000"\n')
f.write('bgcolor = "#00CC00"\n')
elif int(self.system_classification.get_active()) == 2:
f.write('fgcolor = "#000000"\n')
f.write('bgcolor = "#33FFFF"\n')
elif int(self.system_classification.get_active()) == 3:
f.write('fgcolor = "#FFFFFF"\n')
f.write('bgcolor = "#FF0000"\n')
elif int(self.system_classification.get_active()) == 4:
f.write('fgcolor = "#FFFFFF"\n')
f.write('bgcolor = "#FF9900"\n')
elif int(self.system_classification.get_active()) == 5:
f.write('fgcolor = "#000000"\n')
f.write('bgcolor = "#FFFF00"\n')
elif int(self.system_classification.get_active()) == 6:
f.write('fgcolor = "#000000"\n')
f.write('bgcolor = "#FFFF00"\n')
else:
f.write('fgcolor = "#000000"\n')
f.write('bgcolor = "#FFFFFF"\n')
f.close()
# Write Kickstart Configuration
f = open('/tmp/stig-fix','w')
if int(self.system_profile.get_active()) > 0:
f.write('network --device eth0 --bootproto dhcp --noipv6 --hostname '+self.hostname.get_text()+'\n')
else:
f.write('network --device eth0 --bootproto static --ip=192.168.1.101 --netmask=255.255.255.0 --onboot=on --noipv6 --hostname '+self.hostname.get_text()+'\n')
f.write('rootpw --iscrypted '+str(self.password)+'\n')
f.write('bootloader --location=mbr --driveorder='+str(self.data["INSTALL_DRIVES"])+' --append="crashkernel=auto rhgb quiet audit=1" --password='+str(self.password)+'\n')
#f.close()
# Write Kickstart Configuration (Hostname/Passwords)
#f = open('/tmp/partitioning','w')
if self.data["IGNORE_DRIVES"] != "":
f.write('ignoredisk --drives='+str(self.data["IGNORE_DRIVES"])+'\n')
f.write('zerombr\n')
f.write('clearpart --all --drives='+str(self.data["INSTALL_DRIVES"])+'\n')
if self.encrypt_disk.get_active() == True:
f.write('part pv.01 --grow --size=200 --encrypted --cipher=\'aes-xts-plain64\' --passphrase='+str(self.passwd)+'\n')
else:
f.write('part pv.01 --grow --size=200\n')
f.write('part /boot --fstype=ext4 --size=300\n')
f.write('volgroup vg1 --pesize=4096 pv.01\n')
f.write('logvol / --fstype=ext4 --name=lv_root --vgname=vg1 --size=2048 --grow --percent='+str(self.root_partition.get_value_as_int())+'\n')
f.write('logvol /home --fstype=ext4 --name=lv_home --vgname=vg1 --size=1024 --grow --percent='+str(self.home_partition.get_value_as_int())+'\n')
f.write('logvol /tmp --fstype=ext4 --name=lv_tmp --vgname=vg1 --size=512 --grow --percent='+str(self.tmp_partition.get_value_as_int())+'\n')
f.write('logvol /var --fstype=ext4 --name=lv_var --vgname=vg1 --size=512 --grow --percent='+str(self.var_partition.get_value_as_int())+'\n')
f.write('logvol /var/log --fstype=ext4 --name=lv_log --vgname=vg1 --size=512 --grow --percent='+str(self.log_partition.get_value_as_int())+'\n')
f.write('logvol /var/log/audit --fstype=ext4 --name=lv_audit --vgname=vg1 --size=512 --grow --percent='+str(self.audit_partition.get_value_as_int())+'\n')
if self.swap_partition.get_value_as_int() >= 1:
f.write('logvol swap --fstype=swap --name=lv_swap --vgname=vg1 --size=256 --maxsize=4096 --percent='+str(self.swap_partition.get_value_as_int())+'\n')
if self.opt_partition.get_value_as_int() >= 1:
f.write('logvol /opt --fstype=ext4 --name=lv_opt --vgname=vg1 --size=512 --grow --percent='+str(self.opt_partition.get_value_as_int())+'\n')
if self.www_partition.get_value_as_int() >= 1:
f.write('logvol /var/www --fstype=ext4 --name=lv_www --vgname=vg1 --size=512 --grow --percent='+str(self.www_partition.get_value_as_int())+'\n')
f.close()
f = open('/tmp/system-choice','w')
if self.tim_install.get_active() == True:
f.write('echo Installing tim config')
f.write('/opt/tim_config/install\n')
if self.core_install.get_active() == True:
f.write('echo Installing core config')
f.write('/opt/core_config/install\n')
f.close()
gtk.main_quit()
# Executes Window Display
if __name__ == "__main__":
window = Display_Menu()
gtk.main()
| apache-2.0 | 9,098,609,482,478,488,000 | 40.409244 | 586 | 0.604542 | false |
spooky/lobby | src/widgets.py | 1 | 6282 | import logging
import os
import re
import asyncio
from collections import OrderedDict
from PyQt5.QtCore import QObject, QCoreApplication, QUrl, pyqtSignal, pyqtSlot
from PyQt5.QtQml import QQmlApplicationEngine
from PyQt5.QtGui import QGuiApplication, QIcon
from PyQt5.QtQuick import QQuickItem
import settings
import factories
from utils.async import asyncSlot
from view_models.chrome import MainWindowViewModel, LoginViewModel, TaskStatusViewModel
class Application(QGuiApplication):
logChanged = pyqtSignal(str)
initComplete = pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.setWindowIcon(QIcon('views/icons/faf.ico'))
except AttributeError: # setWindowIcon is available on windows only
pass
self.mapLookup = {}
self.modLookup = {}
@asyncio.coroutine
def __initMapLookup(self):
local = yield from factories.localMapLookup(settings.getMapDirs())
self.mapLookup.update(local)
@asyncio.coroutine
def __initModLookup(self):
local = yield from factories.localModLookup(settings.getModDirs())
self.modLookup.update(local)
@asyncio.coroutine
def __queueTask(self, asyncCoroutine, text='', indefinite=True, progress=0.0, running=False):
with self.report(text, indefinite, progress, running):
yield from asyncCoroutine()
@asyncSlot
def start(self):
logger = logging.getLogger(__name__)
try:
self.mainWindow = MainWindow(self)
self.mainWindow.show()
except Exception as e:
logger.critical('error during init: {}'.format(e))
self.quit()
else:
try:
logger.info('loading maps')
yield from self.__queueTask(self.__initMapLookup, QCoreApplication.translate('Application', 'loading maps'))
logger.info('loading mods')
yield from self.__queueTask(self.__initModLookup, QCoreApplication.translate('Application', 'loading mods'))
except Exception as e:
logger.error(e)
finally:
logger.debug('init complete')
self.initComplete.emit()
# Required for QtHandler to propagate log messages to client 'console'
def log(self, msg):
self.logChanged.emit(msg)
def report(self, text='', indefinite=True, progress=0.0, running=False):
status = TaskStatusViewModel(text, indefinite, progress, running)
self.mainWindow.windowModel.taskList.append(status)
return status
class MainWindow(QObject):
def __init__(self, parent=None):
super().__init__(parent)
self.log = logging.getLogger(__name__)
self.app = Application.instance()
self.windowModel = MainWindowViewModel(parent=self)
self.windowModel.switchView.connect(self._onSwitchView)
self.loginModel = LoginViewModel(self.app, parent=self)
self.loginModel.readCredentials()
self.loginModel.panelVisible = not self.loginModel.remember
if self.loginModel.remember:
self.loginModel.autologin()
self.engine = QQmlApplicationEngine(self)
self.engine.rootContext().setContextProperty('windowModel', self.windowModel)
self.engine.rootContext().setContextProperty('loginModel', self.loginModel)
self.engine.quit.connect(parent.quit)
self.engine.load(QUrl.fromLocalFile('views/Chrome.qml'))
self.viewManager = ViewManager(self.engine.rootContext(), self.windowModel, parent=self)
first = self._registerViews(settings.MODULES, self.app)
self.viewManager.loadView(first)
self.window = self.engine.rootObjects()[0]
# wire up logging console
self.console = self.window.findChild(QQuickItem, 'console')
parent.logChanged.connect(self._onLogChanged)
def show(self):
if not self.windowModel.currentView:
raise Exception('currentView not set')
self.window.show()
self.log.debug('client up')
def _registerViews(self, views, app):
for view in views:
self.viewManager.registerView(view)
# TODO need nicer solution - would be nice if the list was notifyable
self.windowModel.registeredViews = list(self.viewManager.views)
return views[0]
@pyqtSlot(str)
def _onSwitchView(self, name):
self.viewManager.loadView(name)
@pyqtSlot(str)
def _onLogChanged(self, msg):
# replace with collections.deque binding(ish)?
if self.console.property('lineCount') == settings.LOG_BUFFER_SIZE:
line_end = self.console.property('text').find('\n') + 1
self.console.remove(0, line_end)
self.console.append(msg)
class ViewManager(QObject):
def __init__(self, context, windowViewModel, parent=None):
super().__init__(parent)
self._context = context
self._window = windowViewModel
self._views = OrderedDict()
def registerView(self, name, *args, **kwargs):
'''
Works on a convention. The view requires 2 thins:
1) the ui file which should be the camel cased .qml file in the ui directory. Path should be relative to Chrome.qml
2) the view model which should be a class in the view_models module
'''
if self._views.get(name):
raise Exception('{} already registered'.format(name))
n = self._convertName(name)
vm_name = '{}ViewModel'.format(n)
# equivalent of from <name>.view_models import <vm_name>
vm = __import__(name + '.view_models', globals(), locals(), [vm_name], 0)
self._views[name] = (n, (getattr(vm, vm_name))(*args, parent=self, **kwargs))
def getView(self, name):
return self._views[name]
def loadView(self, name):
viewName, viewModel = self.getView(name)
self._context.setContextProperty('contentModel', viewModel)
self._window.currentView = os.path.join('..', name, 'views', viewName)
@property
def views(self):
return self._views
def _convertName(self, name):
return re.sub('([_\s]?)([A-Z]?[a-z]+)', lambda m: m.group(2).title(), name)
| gpl-3.0 | -6,388,136,681,272,199,000 | 34.491525 | 124 | 0.646769 | false |
albertomr86/python-logging-rabbitmq | setup.py | 1 | 1451 | from setuptools import setup
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
INSTALL_REQUIRES = [
'pika>=0.13'
]
TEST_REQUIRES = [
"pytest>=4.3.0",
"pytest-cov>=2.6.1",
"pytest-mock>=1.10.1",
]
setup(name='python-logging-rabbitmq',
version='2.0.0',
url='https://github.com/albertomr86/python-logging-rabbitmq',
description='Send logs to RabbitMQ from Python/Django',
keywords='logging rabbitmq logs',
license='MIT',
author='Alberto Menendez Romero',
author_email="[email protected]",
classifiers=CLASSIFIERS,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
install_requires=INSTALL_REQUIRES,
packages=['python_logging_rabbitmq'],
extras_require={
'dev': ['check-manifest']
},
setup_requires=['pytest-runner'],
test_suite='tests',
tests_require=TEST_REQUIRES,
zip_safe=True)
| mit | -5,433,406,266,106,302,000 | 27.45098 | 82 | 0.620262 | false |
aristanetworks/arista-ovs-nova | nova/virt/netutils.py | 1 | 3524 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network-releated utilities for supporting libvirt connection code."""
import netaddr
from nova.openstack.common import cfg
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.config')
CONF.import_opt('injected_network_template', 'nova.virt.disk.api')
Template = None
def _late_load_cheetah():
global Template
if Template is None:
t = __import__('Cheetah.Template', globals(), locals(),
['Template'], -1)
Template = t.Template
def get_net_and_mask(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net.netmask)
def get_net_and_prefixlen(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
net = netaddr.IPNetwork(cidr)
return int(net.version)
def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
template=CONF.injected_network_template):
"""
return a rendered network template for the given network_info
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
Note: this code actually depends on the legacy network_info, but will
convert the type itself if necessary.
"""
# the code below depends on the legacy 'network_info'
if hasattr(network_info, 'legacy'):
network_info = network_info.legacy()
nets = []
ifc_num = -1
have_injected_networks = False
for (network_ref, mapping) in network_info:
ifc_num += 1
if not network_ref['injected']:
continue
have_injected_networks = True
address = mapping['ips'][0]['ip']
netmask = mapping['ips'][0]['netmask']
address_v6 = None
gateway_v6 = None
netmask_v6 = None
if use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
netmask_v6 = mapping['ip6s'][0]['netmask']
gateway_v6 = mapping['gateway_v6']
net_info = {'name': 'eth%d' % ifc_num,
'address': address,
'netmask': netmask,
'gateway': mapping['gateway'],
'broadcast': mapping['broadcast'],
'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6}
nets.append(net_info)
if have_injected_networks is False:
return None
if not template:
return None
_late_load_cheetah()
ifc_template = open(template).read()
return str(Template(ifc_template,
searchList=[{'interfaces': nets,
'use_ipv6': use_ipv6}]))
| apache-2.0 | -1,055,264,008,260,136,000 | 29.37931 | 78 | 0.61975 | false |
Outernet-Project/tekhenu | tekhenu/routes/content_list.py | 1 | 5442 | """
content_list.py: Content list request handler, and suggestion form handler
Tekhenu
(c) 2014, Outernet Inc
All rights reserved.
This software is free software licensed under the terms of GPLv3. See COPYING
file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt.
"""
from __future__ import unicode_literals, division
import math
import logging
from urlparse import urlparse
from bottle_utils import csrf
from bottle_utils.i18n import i18n_path
from google.appengine.ext import ndb
from bottle_utils.i18n import lazy_gettext as _
from bottle import view, default_app, request, response, redirect
from db.models import Content
from . import QueryResult
app = default_app()
PREFIX = '/'
def get_content_list(per_page=20):
"""
Create a query over ``Content`` objects using query string parameters.
:param per_page: number of items to return per page
:returns: ``QueryResult`` object
"""
search = request.params.getunicode('q', '').strip()
status = request.params.get('status')
license = request.params.get('license')
votes = request.params.get('votes')
page = int(request.params.get('p', '1'))
q = Content.query()
if search:
keywords = Content.get_keywords(search)
if len(keywords) > 1:
q = q.filter(ndb.AND(*[Content.keywords == kw for kw in keywords]))
if len(keywords) == 1:
q = q.filter(Content.keywords == keywords[0])
if status:
q = q.filter(Content.status == status)
if license == 'free':
q = q.filter(Content.is_free == True)
elif license == 'nonfree':
q = q.filter(Content.is_free == False)
elif license == 'unknown':
q = q.filter(Content.license == None)
if votes == 'asc':
q = q.order(+Content.votes)
elif votes == 'desc':
q = q.order(-Content.votes)
q = q.order(-Content.updated)
count = q.count()
if not count:
return QueryResult([], count, 1, 1)
npages = int(math.ceil(count / per_page))
if page * per_page > count:
page = npages
offset = int(per_page * (page - 1))
return QueryResult(q.fetch(per_page, offset=offset), count, page, npages)
@app.get(PREFIX)
@csrf.csrf_token
@view('content_list', errors={}, Content=Content)
def show_content_list():
"""
Show a list of 10 last-updated pieces of content and a suggestion form.
"""
return dict(vals=request.params, content=get_content_list())
@app.post(PREFIX)
@csrf.csrf_protect
@view('content_list', Content=Content)
def add_content_suggestion():
"""
Handle a content suggestion request.
"""
# TODO: Handle Unicode URLs
url = Content.validate_url(request.forms.get('url', ''))
license = request.forms.get('license') or None
errors = {}
if not url:
# Translators, used as error message on failure submit suggestion
errors['url'] = _('This URL is invalid')
if license:
license = license.strip().upper()
if license not in Content.LICENSE_CHOICES:
# Translators, used as error message on failure to submit
# suggestion
errors['license'] = _('Please select a license from provided '
'choices')
if not url:
# Translators, used as error message on failure to submit suggestion
errors['url'] = _('Please type in a valid URL')
if not errors:
try:
content = Content.create(url=url, license=license)
logging.info("Created content for '%s' (real url: '%s')", url,
content.url)
response.flash(_('Your suggestion has been added'))
redirect(i18n_path(content.path))
except Content.InvalidURLError as err:
logging.debug("URL error while parsing '%s': %s", url, err)
# Translators, used as error message on failure submit suggestion
errors['url'] = _('This URL is invalid')
except Content.FetchError as err:
logging.debug("Fetch error while parsing '%s': %s (%s)",
url, err, err.error)
# Translators, used as error message on failure submit suggestion
errors['url'] = _('The page at specified URL does not exist or '
'the domain cannot be reached.')
except Content.NotAllowedError as err:
logging.debug("Access error while parsing '%s': %s", url, err)
# Translators, used as error message on failure submit suggestion
errors['url'] = _('The page must be accessible to robots')
except Content.ContentError as err:
logging.debug("Content error while parsing '%s': %s (%s)", url,
err, err.error)
# Translators, used as error message on failure submit suggestion
errors['url'] = _('The content on the page could not be '
'understood, please provide and URL to a valid '
'web page')
except Exception as err:
logging.debug("Unknown error fetching '%s': %s", url, err)
# Translators, used as error message on failure submit suggestion
errors['url'] = _('There was an unknown error with the URL')
return dict(vals=request.forms, errors=errors, Content=Content,
content=get_content_list())
| gpl-3.0 | 883,379,827,784,138,000 | 34.109677 | 79 | 0.610437 | false |
pearpai/TensorFlow-action | deep_learning_with_tensorFlow/Chapter04/p7902.py | 1 | 1719 | import tensorflow as tf
from numpy.random import RandomState
batch_size = 8
# 两个输入节点
x = tf.placeholder(tf.float32, shape=(None, 2), name='x-input')
# 回归问题一般只有一个输出节点
y_ = tf.placeholder(tf.float32, shape=(None, 1), name='y-input')
# 定义了一个单层的神经网络前向传播过程,这里就是简单加权和
w1 = tf.Variable(tf.random_normal([2, 1], stddev=1, seed=1))
y = tf.matmul(x, w1)
# 定义预测多了 和 预测少了的成本
loss_less = 10
loss_more = 1
loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less))
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
# 通过随机数生成一个模拟数据集
rdm = RandomState(1)
dataset_size = 128
X = rdm.rand(dataset_size, 2)
# 设置回归的正确值为两个输入的喝加上一个随机量。之所以要加上一个随机量是为了
# 加入不可预测的噪音,否则不同损失函数的意义不大,因为不同损失函数都会在能
# 完全预测正确的时候最低。一般来说噪音为一个均值为0的小量,所以这里的噪音设置为
# -0.05 ~ 0.05 的随机数
Y = [[x1 + x2 + (rdm.rand() / 10.0 - 0.05)] for (x1, x2) in X]
# 训练神经网络
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
STEPS = 5000
for i in range(STEPS):
start = (i * batch_size) % 128
end = (i * batch_size) % 128 + batch_size
sess.run(train_step, feed_dict={x: X[start:end], y_: Y[start:end]})
if i % 1000 == 0:
print("After %d training step(s), w1 is: " % (i))
print(sess.run(w1), "\n")
print("Final w1 is: \n", sess.run(w1))
| apache-2.0 | -4,511,052,033,741,601,000 | 29.813953 | 93 | 0.638491 | false |
xunxunzgq/open-hackathon-bak_01 | open-hackathon-server/src/hackathon/user/login.py | 1 | 13490 | # -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------
# Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------------
# -*- coding:utf8 -*-
# encoding = utf-8
import sys
sys.path.append("..")
import json
from hackathon.constants import OAUTH_PROVIDER
from hackathon import RequiredFeature, Component
import urllib2
class LoginProviderBase(Component):
user_manager = RequiredFeature("user_manager")
def login(self, args):
pass
def logout(self, user):
return self.user_manager.db_logout(user)
def user_display_info_with_token(self, user_with_token):
user = user_with_token["user"]
login_result = {
"user": self.user_manager.user_display_info(user),
"token": user_with_token["token"].token
}
self.log.debug("user login successfully:" + repr(login_result))
return login_result
def get_remote(url, headers=None):
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url, None, headers)
resp = opener.open(request)
return resp.read()
class QQLogin(LoginProviderBase):
def login(self, args):
access_token = args['access_token']
# get openID.
openid_resp = self.get_remote(self.util.get_config("login.qq.openid_url") + access_token)
self.log.debug("get access_token from qq:" + access_token)
info = json.loads(openid_resp[10:-4])
openid = info['openid']
self.log.debug("get client_id from qq:" + openid)
client_id = info['client_id']
self.log.debug("get openid from qq:" + client_id)
# get user info
url = self.util.get_config("login.qq.user_info_url") % (access_token, client_id, openid)
user_info_resp = self.get_remote(url)
self.log.debug("get user info from qq:" + user_info_resp)
user_info = self.util.convert(json.loads(user_info_resp))
email_list = []
user_with_token = self.user_manager.db_login(openid,
provider=OAUTH_PROVIDER.QQ,
name=user_info["nickname"],
nickname=user_info["nickname"],
access_token=access_token,
email_list=email_list,
avatar_url=user_info["figureurl"])
return self.user_display_info_with_token(user_with_token)
class GithubLogin(LoginProviderBase):
def login(self, args):
access_token = args.get('access_token')
# get user info
user_info_resp = self.get_remote(self.util.get_config('login.github.user_info_url') + access_token)
self.log.debug("get user info from github:" + user_info_resp + '\n')
# example:
#
# {"login":"juniwang","id":8814383,"avatar_url":"https://avatars.githubusercontent.com/u/8814383?v=3","gravatar_id":"",
# "url":"https://api.github.com/users/juniwang","html_url":"https://github.com/juniwang",
# "followers_url":"https://api.github.com/users/juniwang/followers",
# "following_url":"https://api.github.com/users/juniwang/following{/other_user}",
# "gists_url":"https://api.github.com/users/juniwang/gists{/gist_id}",
# "starred_url":"https://api.github.com/users/juniwang/starred{/owner}{/repo}",
# "subscriptions_url":"https://api.github.com/users/juniwang/subscriptions",
# "organizations_url":"https://api.github.com/users/juniwang/orgs","repos_url":"https://api.github.com/users/juniwang/repos",
# "events_url":"https://api.github.com/users/juniwang/events{/privacy}",
# "received_events_url":"https://api.github.com/users/juniwang/received_events","type":"User","site_admin":false,
# "name":"Junbo Wang","company":"","blog":"","location":"Shanghai China",
# "email":"[email protected]","hireable":false,"bio":null,"public_repos":12,"public_gists":0,"followers":0,
# "following":1,"created_at":"2014-09-18T01:30:30Z","updated_at":"2014-11-25T09:00:37Z","private_gists":0,
# "total_private_repos":0,"owned_private_repos":0,"disk_usage":14179,"collaborators":0,
# "plan":{"name":"free","space":307200,"collaborators":0,"private_repos":0}}
#
user_info = json.loads(user_info_resp)
name = user_info["login"]
nickname = user_info["name"] if "name" in user_info else name
openid = str(user_info["id"])
avatar = user_info["avatar_url"]
# get user primary email
email_info_resp = self.get_remote(self.util.get_config('login.github.emails_info_url') + access_token)
self.log.debug("get email from github:" + email_info_resp + '\n')
# email_info include all user email provided by github
email_list = json.loads(email_info_resp)
user_with_token = self.user_manager.db_login(openid,
provider=OAUTH_PROVIDER.GITHUB,
name=name,
nickname=nickname,
access_token=access_token,
email_list=email_list,
avatar_url=avatar)
return self.user_display_info_with_token(user_with_token)
class GitcafeLogin(LoginProviderBase):
def login(self, args):
token = args.get('access_token')
value = "Bearer " + token
header = {"Authorization": value}
user_info = self.get_remote(self.util.get_config("login.gitcafe.user_info_url"), header)
self.log.debug("get user info from GitCafe:" + user_info + "\n")
info = json.loads(user_info)
name = info['username']
email = info['email']
id = info['id']
nickname = info['fullname']
if nickname is None:
nickname = name
if info['avatar_url'].startswith('https'):
avatar_url = info['avatar_url']
else:
avatar_url = "https" + info['avatar_url'][4:]
email_list = [
{
'name': name,
'email': email,
'verified': 1,
'primary': 1
}
]
user_with_token = self.user_manager.db_login(id,
provider=OAUTH_PROVIDER.GITCAFE,
name=name,
nickname=nickname,
access_token=token,
email_list=email_list,
avatar_url=avatar_url)
return self.user_display_info_with_token(user_with_token)
class WeiboLogin(LoginProviderBase):
def login(self, args):
access_token = args.get('access_token')
uid = args.get('uid')
# get user info
# https://api.weibo.com/2/users/show.json?access_token=2.005RDjXC0rYD8d39ca83156aLZWgZE&uid=1404376560
user_info_resp = self.get_remote(
self.util.get_config('login.weibo.user_info_url') + access_token + "&uid=" + uid)
user_info = json.loads(user_info_resp)
self.log.debug("get user base info from Weibo:" + user_info_resp)
# {"id":2330622122,"idstr":"2330622122","class":1,"screen_name":"test name","name":"test name",
# "province":"31","city":"10","location":"shanghai yangpu","description":"","url":"",
# "profile_image_url":"http://tp3.sinaimg.cn/2330622122/50/5629035320/1",
# "profile_url":"u/2330622122","domain":"","weihao":"","gender":"m","followers_count":34,
# "friends_count":42,"pagefriends_count":0,"statuses_count":0,"favourites_count":1,
# "created_at":"Mon Aug 22 17:58:15 +0800 2011","following":false,"allow_all_act_msg":false,
# "geo_enabled":true,"verified":false,"verified_type":-1,"remark":"","ptype":0,"allow_all_comment":true,
# "avatar_large":"http://tp3.sinaimg.cn/2330622122/180/5629035320/1","avatar_hd":"http://tp3.sinaimg.cn/2330622122/180/5629035320/1",
# "verified_reason":"","verified_trade":"","verified_reason_url":"","verified_source":"","verified_source_url":"",
# "follow_me":false,"online_status":0,"bi_followers_count":8,"lang":"zh-cn","star":0,"mbtype":0,"mbrank":0,
# "block_word":0,"block_app":0,"credit_score":80,"urank":6}
openid = user_info['id']
name = user_info['name']
nickname = user_info['screen_name']
avatar_url = user_info['avatar_hd']
# get user email
email_list = []
try:
email_info_resp = self.get_remote(self.util.get_config('login.weibo.email_info_url') + access_token)
self.log.debug("get email from github:" + email_info_resp)
email_info_resp_json = json.loads(email_info_resp)
email = email_info_resp_json['email']
email_list = [
{
'name': name,
'email': email,
'verified': 1,
'primary': 1
}
]
except Exception as e:
self.log.debug("fail to get user email from weibo")
self.log.error(e)
user_with_token = self.user_manager.db_login(openid,
provider=OAUTH_PROVIDER.WEIBO,
name=name,
nickname=nickname,
access_token=access_token,
email_list=email_list,
avatar_url=avatar_url)
return self.user_display_info_with_token(user_with_token)
class LiveLogin(LoginProviderBase):
def login(self, args):
access_token = args.get('access_token')
self.log.debug("access_token is following")
self.log.debug(access_token)
self.log.debug(self.util.get_config('login.live.user_info_url'))
user_info_resp = self.get_remote(self.util.get_config('login.live.user_info_url') + access_token)
self.log.debug("get user info from live:" + user_info_resp)
# user.info
# {u'first_name': u'Ice', u'last_name': u'Shi', u'name': u'Ice Shi', u'locale': u'en_US', \
# u'gender': None,\
# u'emails': {u'personal': None, u'account': u'[email protected]', u'business': None, u'preferred': u'[email protected]'}, \
# u'link': u'https://profile.live.com/', \
# u'updated_time': u'2015-05-13T02:28:32+0000',\
# u'id': u'655c03b1b314b5ee'}
user_info = json.loads(user_info_resp)
self.log.debug(user_info)
name = user_info["name"]
openid = str(args.get('user_id'))
# avatar = user_info["avatar_url"]
email = user_info["emails"]["account"]
email_list = [
{
'name': name,
'email': email,
'verified': 1,
'primary': 1
}
]
user_with_token = self.user_manager.db_login(openid,
provider=OAUTH_PROVIDER.LIVE,
name=name,
nickname=name,
access_token=access_token,
email_list=email_list,
avatar_url=None)
return self.user_display_info_with_token(user_with_token)
login_providers = {
OAUTH_PROVIDER.GITHUB: GithubLogin(),
OAUTH_PROVIDER.QQ: QQLogin(),
OAUTH_PROVIDER.GITCAFE: GitcafeLogin(),
OAUTH_PROVIDER.WEIBO: WeiboLogin(),
OAUTH_PROVIDER.LIVE: LiveLogin()
} | mit | 5,231,581,005,753,723,000 | 46.671378 | 141 | 0.544922 | false |
ThoriumGroup/thorium | thorium/utils/flags.py | 1 | 13209 | #!/usr/bin/env python
"""
Thorium Utils Flags
===================
Nuke Knob Flags which can be difficult to access due to Nuke not storing
readily available variables for them, forcing the use the integer values as
seen below.
Any of these flags can now be used with:
::
from thorium.utils import flags
And then when needed:
::
gain = nuke.Array_Knob('gain')
gain.setFlag(flags.SLIDER)
gain.setFlag(flags.LOG_SLIDER)
Non-PEP8 Styling is used within this script to preserve readability.
## Version Restrictions
Flags new in 6.3:
* KNOB_CHANGED_RECURSIVE
* MODIFIES_GEOMETRY
* OUTPUT_ONLY
* NO_KNOB_CHANGED_FINISHED
* SET_SIZE_POLICY
* EXPAND_TO_WIDTH
Flags new in 6.2:
* READ_ONLY
* GRANULARY_UNDO
* NO_RECURSIVE_PATHS
## License
The MIT License (MIT)
Flags
Copyright (c) 2010-2014 John R.A. Benson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# =============================================================================
# GLOBALS
# =============================================================================
# General Flags ===============================================================
# Must not intersect any class-specific flags
DISABLED = 0x0000000000000080 # DISABLED Set by disable(), cleared by enable().
NO_ANIMATION = 0x0000000000000100 # NO_ANIMATION Prevent the value from being animated.
# This removes any anymation or view buttons, and
# it stops tcl expressions from being evaluated in
# string knobs, and may make it ignore attempts to
# set expressions or key frames (nyi).
DO_NOT_WRITE = 0x0000000000000200 # DO_NOT_WRITE Don't ever save this knob to a script
# (including copy & paste!)
INVISIBLE = 0x0000000000000400 # INVISIBLE The knob does not appear in the panels.
# No widgets are created. This is not the same
# as hide(), and show() will not undo it!
RESIZABLE = 0x0000000000000800 # RESIZABLE The knob can stretch in the panel so
# that it fills up all the remaining space in the line.
# Defaults to true for most of the complex knobs,
# but off for buttons, checkmarks, and pulldown lists.
STARTLINE = 0x0000000000001000 # STARTLINE This knob starts a new row in the panel.
# The default is true unless a zero-length (not NULL)
# string is passed as the label. Currently the default
# is false for checkmarks and buttons but this may
# change in future versions.
ENDLINE = 0x0000000000002000 # ENDLINE This knob will end a row, acts exactly
# like STARTLINE was set on the next knob.
# Set true for divider lines.
NO_RERENDER = 0x0000000000004000 # NO_RERENDER This knob does not contribute to the
# hash value for the op. This should be used on knobs
# that have no effect on the op's output.
NO_HANDLES = 0x0000000000008000 # NO_HANDLES Don't draw anything in the viewer,
# this is useful if the Op draws it's own indicators.
KNOB_CHANGED_ALWAYS = 0x0000000000010000 # KNOB_CHANGED_ALWAYS will call node()->knob_changed()
# every time the value of the knob changes. Normally
# it is only called if the user changes the value with
# the panel open. This allows you to track all changes to
# the value. Be careful as knob_changed() will be called
# without storing the new values into your structure.
NO_KNOB_CHANGED = 0x0000000000020000 # NO_KNOB_CHANGED: Don't bother calling Op::knob_changed()
# with this knob. This is turned on automatically
# if the knob_changed() returns false.
HIDDEN = 0x0000000000040000 # HIDDEN Set by hide(), cleared by show().
NO_UNDO = 0x0000000000080000 # NO_UNDO Don't undo/redo any changes to this knob.
# May be replaced with "output knob" in the future.
ALWAYS_SAVE = 0x0000000000100000 # ALWAYS_SAVE save the knob to a script even if not_default()
# returns false. *Deprecated*, instead override
# not_default() and make it return true!
NODE_KNOB = 0x0000000000200000 # NODE_KNOB is used by Nuke internally for controls on
# the DAG appearance such as xpos and ypos.
HANDLES_ANYWAY = 0x0000000000400000 # HANDLES_ANYWAY makes the handles appear in the viewer when
# the panel is open even if a different tab is selected.
READ_ONLY = 0x0000000010000000 # knob cannot be modified by UI intervention but can
# still be copied from etc
# Internal Use Flags ==========================================================
INDETERMINATE = 0x0000000000800000
COLOURCHIP_HAS_UNSET = 0x0000000001000000 #/< whether a color chip can be in the 'unset' state,
# DEFAULTS TO FALSE
SMALL_UI = 0x0000000002000000
NO_NUMERIC_FIELDS = 0x0000000004000000
NO_CURVE_EDITOR = 0x0000000020000000
NO_MULTIVIEW = 0x0000000040000000
EARLY_STORE = 0x0000000080000000
KNOB_CHANGED_RECURSIVE = 0x0000000008000000 # 6.3 recursive knobChanged calls are guarded against.
# To override the non-recursion on a particular knob,
# specify this flag
MODIFIES_GEOMETRY = 0x0000000100000000 # 6.3 MODIFIES_GEOMETRY should be set for any knob
# that modifies geometry, either by affecting the
# internal geometry directly or by changing its transform
OUTPUT_ONLY = 0x0000000200000000 # 6.3
NO_KNOB_CHANGED_FINISHED = 0x0000000400000000 # 6.3
SET_SIZE_POLICY = 0x0000000800000000 # 6.3
EXPAND_TO_WIDTH = 0x0000001000000000 # 6.3 Just for enum knobs currently
# Numeric Knob Flags ==========================================================
MAGNITUDE = 0x0000000000000001 # MAGNITUDE If there are several numbers, this enables a
# button to only show a single number, and all are set
# equal to this number. Default is true for WH_knob()
# and Color_knob().
SLIDER = 0x0000000000000002 # SLIDER Turns on the slider. Currently this only works if
# the size is 1 or MAGNITUDE is enabled and it is set
# to single numbers.
# Defaults to on for most non-integer numerical controls.
LOG_SLIDER = 0x0000000000000004 # LOG_SLIDER Tick marks on the slider (if enabled with SLIDER)
# are spaced logarithmically. This is turned on for
# WH_knob() and Color_knob(), and if the range has both
# ends greater than zero. If you turn this on and the
# range passes through zero, the scale is actually the cube
# root of the number, not the logarithim.
STORE_INTEGER = 0x0000000000000008 # STORE_INTEGER Only integer values should be displayed/stored
FORCE_RANGE = 0x0000000000000010 # FORCE_RANGE Clamps the value to the range when storing.
ANGLE = 0x0000000000000020 # ANGLE Turn on a widget depicting this number as an angle.
NO_PROXYSCALE = 0x0000000000000040 # NO_PROXYSCALE disables proxy scaling for XY or WH knobs.
# Useful if you just want two numbers called "x" and "y"
# that are not really a position.
# You probably also want to do NO_HANDLES.
# String Knob Flags ===========================================================
GRANULAR_UNDO = 0x0000000000000001
NO_RECURSIVE_PATHS = 0x0000000000000002
# Enumeration Knob Flags ======================================================
SAVE_MENU = 0x0000000002000000 # SAVE_MENU writes the contents of the menu to the saved
# script. Useful if your plugin modifies the list of items.
# BeginGroup Knob Flags =======================================================
CLOSED = 0x0000000000000001 # CLOSED True for a BeginGroup knob that is closed
TOOLBAR_GROUP = 0x0000000000000002 # Make the group into a viewer toolbar
TOOLBAR_LEFT = 0x0000000000000000 # Position in the viewer. Only set one of these:
TOOLBAR_TOP = 0x0000000000000010
TOOLBAR_BOTTOM = 0x0000000000000020
TOOLBAR_RIGHT = 0x0000000000000030
TOOLBAR_POSITION = 0x0000000000000030 # A mask for the position part of the flags
# ChannelSet/Channel Knob Flags ===============================================
NO_CHECKMARKS = 0x0000000000000001 # NO_CHECKMARKS Get rid of the individual channel checkmarks.
NO_ALPHA_PULLDOWN = 0x0000000000000002 # NO_ALPHA_PULLDOWN Get rid of the extra pulldown that lets
# you set the 4th channel to an arbitrary different layer
# than the first 3.
# Format Knob Flags ===========================================================
PROXY_DEFAULT = 0x0000000000000001 # PROXY_DEFAULT makes the default value be the
# root.proxy_format rather than the root.format.
# =============================================================================
# EXPORTS
# =============================================================================
__all__ = [
'ALWAYS_SAVE',
'ANGLE',
'CLOSED',
'COLOURCHIP_HAS_UNSET',
'DISABLED',
'DO_NOT_WRITE',
'EARLY_STORE',
'ENDLINE',
'EXPAND_TO_WIDTH',
'FORCE_RANGE',
'GRANULAR_UNDO',
'HANDLES_ANYWAY',
'HIDDEN',
'INDETERMINATE',
'INVISIBLE',
'KNOB_CHANGED_ALWAYS',
'KNOB_CHANGED_RECURSIVE',
'LOG_SLIDER',
'MAGNITUDE',
'MODIFIES_GEOMETRY',
'NODE_KNOB',
'NO_ALPHA_PULLDOWN',
'NO_ANIMATION',
'NO_CHECKMARKS',
'NO_CURVE_EDITOR',
'NO_HANDLES',
'NO_KNOB_CHANGED',
'NO_KNOB_CHANGED_FINISHED',
'NO_MULTIVIEW',
'NO_NUMERIC_FIELDS',
'NO_PROXYSCALE',
'NO_RECURSIVE_PATHS',
'NO_RERENDER',
'NO_UNDO',
'OUTPUT_ONLY',
'PROXY_DEFAULT',
'READ_ONLY',
'RESIZABLE',
'SAVE_MENU',
'SET_SIZE_POLICY',
'SLIDER',
'SMALL_UI',
'STARTLINE',
'STORE_INTEGER',
'TOOLBAR_BOTTOM',
'TOOLBAR_GROUP',
'TOOLBAR_LEFT',
'TOOLBAR_POSITION',
'TOOLBAR_RIGHT',
'TOOLBAR_TOP',
]
| mit | -3,900,856,096,993,762,300 | 44.705882 | 106 | 0.532516 | false |
codl/forget | app.py | 1 | 3007 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import MetaData
from flask_migrate import Migrate
import version
from libforget.cachebust import cachebust
import mimetypes
import libforget.brotli
import libforget.img_proxy
from werkzeug.middleware.proxy_fix import ProxyFix
app = Flask(__name__)
default_config = {
"SQLALCHEMY_TRACK_MODIFICATIONS": False,
"SQLALCHEMY_DATABASE_URI": "postgresql+psycopg2:///forget",
"HTTPS": True,
"SENTRY_CONFIG": {},
"REPO_URL": "https://github.com/codl/forget",
"CHANGELOG_URL": "https://github.com/codl/forget/blob/{hash}/CHANGELOG.markdown",
"REDIS_URI": "redis://",
}
app.config.update(default_config)
app.config.from_pyfile('config.py', True)
metadata = MetaData(naming_convention={
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
})
db = SQLAlchemy(app, metadata=metadata)
migrate = Migrate(app, db)
if 'CELERY_BROKER' not in app.config:
uri = app.config['REDIS_URI']
if uri.startswith('unix://'):
uri = uri.replace('unix', 'redis+socket', 1)
app.config['CELERY_BROKER'] = uri
sentry = None
if 'SENTRY_DSN' in app.config:
from raven.contrib.flask import Sentry
app.config['SENTRY_CONFIG']['release'] = version.get_versions()['version']
sentry = Sentry(app, dsn=app.config['SENTRY_DSN'])
url_for = cachebust(app)
@app.context_processor
def inject_static():
def static(filename, **kwargs):
return url_for('static', filename=filename, **kwargs)
return {'st': static}
@app.after_request
def install_security_headers(resp):
csp = ("default-src 'none';"
"img-src 'self';"
"style-src 'self' 'unsafe-inline';"
"frame-ancestors 'none';"
)
if 'SENTRY_DSN' in app.config:
csp += "script-src 'self' https://cdn.ravenjs.com/;"
csp += "connect-src 'self' https://sentry.io/;"
else:
csp += "script-src 'self' 'unsafe-eval';"
csp += "connect-src 'self';"
if 'CSP_REPORT_URI' in app.config:
csp += "report-uri " + app.config.get('CSP_REPORT_URI')
if app.config.get('HTTPS'):
resp.headers.set('strict-transport-security',
'max-age={}'.format(60*60*24*365))
csp += "; upgrade-insecure-requests"
resp.headers.set('Content-Security-Policy', csp)
resp.headers.set('referrer-policy', 'no-referrer')
resp.headers.set('x-content-type-options', 'nosniff')
resp.headers.set('x-frame-options', 'DENY')
resp.headers.set('x-xss-protection', '1')
return resp
mimetypes.add_type('image/webp', '.webp')
libforget.brotli.brotli(app)
imgproxy = (
libforget.img_proxy.ImgProxyCache(redis_uri=app.config.get('REDIS_URI')))
app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)
| isc | -8,197,995,419,473,210,000 | 29.373737 | 89 | 0.63984 | false |
jianmingtang/PIC-tools | Python/PIC/DistNASA.py | 1 | 4991 | # Copyright (C) 2014 Jian-Ming Tang <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Distribution
------------
"""
import numpy
class DistNASA:
"""
This class is used to store data in ndarray from a NASA PIC data file.
Methods for data slicing and summation are provided.
"""
data_t = {}
def __init__(self, fname, grid, nss=4):
"""
fname: data filename
grid: number of grid points
nss: number of species
"""
self.grid = grid
self.nss = nss
datatype = numpy.dtype([
('pad1', 'i4'),
('axes', 'f4', (nss, grid)),
('xlo', 'f4'), ('xhi', 'f4'), ('zlo', 'f4'), ('zhi', 'f4'),
('ic', 'i4', (nss,)),
('fxyz', 'f4', (nss, grid, grid, grid)),
('fxy', 'f4', (nss, grid, grid)),
('fxz', 'f4', (nss, grid, grid)),
('fyz', 'f4', (nss, grid, grid)),
('vxa', 'f4', (nss,)),
('vya', 'f4', (nss,)),
('vza', 'f4', (nss,)),
('pad2', 'i4')
])
self.data = numpy.fromfile(fname, datatype)[0]
self.truncate([0, grid])
def __getitem__(self, key):
return self.data_t[key]
def __str__(self):
"""
"""
s = '\n'
s += 'Bin location: '
s += 'x=(%4g,%4g), z=(%4g,%4g)\n' % (
self.data['xlo'], self.data['xhi'],
self.data['zlo'], self.data['zhi'])
# This is broken due to truncation
# This is hard coded to species 1
# s += '(Hard coded) Axes max: %4g\n' % self['axes'][1][-1]
# s += '\n'
# for i in range(self.nss):
# s += 'v['+str(i)+'] = ({0:g}, {1:g}, {2:g})\n'.format(
# self['vxa'][i], self['vya'][i], self['vza'][i])
return s
def truncate(self, r):
""" We do basic slicing here, so that no copies are made.
"""
b = r[0]
e = r[1]
for k in ['fxy', 'fxz', 'fyz']:
self.data_t[k] = self.data[k][:, b:e, b:e]
self.data_t['fxyz'] = self.data['fxyz'][:, b:e, b:e, b:e]
self.data_t['axes'] = self.data['axes'][:, b:e]
# print help(dict(self.data))
# print self.data.has_key('cut')
# if self.data.has_key('cut'):
# self.data_t['cut'] = self.data['cut'][:,b:e,b:e]
def cut(self, p):
"""
Cut out a 2D slice from the 3D data
p = [dir,rmin,rmax]
"""
rmin = int(p[1])
rmax = int(p[2])
A = self['fxyz']
if p[0] == 'x':
self.dataCUT = A[:,:,:, rmin]
for i in range(rmin+1, rmax+1):
self.dataCUT += A[:,:,:, i]
elif p[0] == 'y':
self.dataCUT = A[:,:, rmin,:]
for i in range(rmin+1, rmax+1):
self.dataCUT += A[:,:, i,:]
elif p[0] == 'z':
self.dataCUT = A[:, rmin,:,:]
for i in range(rmin+1, rmax+1):
self.dataCUT += A[:, i,:,:]
else:
raise IndexError
self.data['cut'] = self.dataCUT
def _check_add(self, sps):
# Check the ranges of velocities are consistent.
allowed_error = [1.e-6] * self.grid
self.axes = self['axes'][int(sps[0])]
for s in sps[1:]:
diff = self['axes'][int(s)] - self.axes
if numpy.any(diff > allowed_error):
print addset, ' cannot be combined!!!'
raise IndexError
def add2D(self, sps):
"""
Combine species for a 2D slice
sps = [s1,s2,...]
"""
self._check_add(sps)
self.data2D = self.dataCUT[int(sps[0])]
for s in sps[1:]:
self.data2D += self.dataCUT[int(s)]
def add_reduced(self, sps):
"""
Combine species for reduced data sets
sps = [s1,s2,...]
"""
self._check_add(sps)
self.dataR = {}
for f in ['fxy', 'fxz', 'fyz']:
self.dataR[f] = self[f][int(sps[0])]
for s in sps[1:]:
self.dataR[f] += self[f][int(s)]
def add3D(self, sps):
"""
Combine species for 3D data
sps = [s1,s2,...]
"""
self._check_add(sps)
self.data3D = self['fxyz'][int(sps[0])]
for s in sps[1:]:
self.data3D += self['fxyz'][int(s)]
| gpl-3.0 | 4,494,715,066,244,990,000 | 31.620915 | 75 | 0.47766 | false |
porolakka/motioneye-jp | src/v4l2ctl.py | 1 | 11358 |
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fcntl
import logging
import os.path
import re
import stat
import subprocess
import time
_resolutions_cache = {}
_ctrls_cache = {}
_ctrl_values_cache = {}
_DEV_V4L_BY_ID = '/dev/v4l/by-id/'
def find_v4l2_ctl():
try:
return subprocess.check_output('which v4l2-ctl', shell=True).strip()
except subprocess.CalledProcessError: # not found
return None
def list_devices():
global _resolutions_cache, _ctrls_cache, _ctrl_values_cache
logging.debug('listing v4l devices...')
try:
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl --list-devices', shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
except subprocess.CalledProcessError:
logging.debug('failed to list devices (probably no devices installed)')
return []
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
name = None
devices = []
for line in output.split('\n'):
if line.startswith('\t'):
device = line.strip()
device = find_persistent_device(device)
devices.append((device, name))
logging.debug('found device %(name)s: %(device)s' % {
'name': name, 'device': device})
else:
name = line.split('(')[0].strip()
# clear the cache
_resolutions_cache = {}
_ctrls_cache = {}
_ctrl_values_cache = {}
return devices
def list_resolutions(device):
global _resolutions_cache
if device in _resolutions_cache:
return _resolutions_cache[device]
logging.debug('listing resolutions of device %(device)s...' % {'device': device})
resolutions = set()
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d %(device)s --list-formats-ext | grep -vi stepwise | grep -oE "[0-9]+x[0-9]+" || true' % {
'device': device}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
for pair in output.split('\n'):
pair = pair.strip()
if not pair:
continue
width, height = pair.split('x')
width = int(width)
height = int(height)
if (width, height) in resolutions:
continue # duplicate resolution
if width < 96 or height < 96: # some reasonable minimal values
continue
if width % 16 or height % 16: # ignore non-modulo 16 resolutions
continue
resolutions.add((width, height))
logging.debug('found resolution %(width)sx%(height)s for device %(device)s' % {
'device': device, 'width': width, 'height': height})
if not resolutions:
logging.debug('no resolutions found for device %(device)s, adding the defaults' % {'device': device})
# no resolution returned by v4l2-ctl call, add common default resolutions
resolutions.add((320, 240))
resolutions.add((640, 480))
resolutions.add((800, 480))
resolutions.add((1024, 576))
resolutions.add((1024, 768))
resolutions.add((1280, 720))
resolutions.add((1280, 800))
resolutions.add((1280, 960))
resolutions.add((1280, 1024))
resolutions.add((1440, 960))
resolutions.add((1440, 1024))
resolutions.add((1600, 1200))
resolutions = list(sorted(resolutions, key=lambda r: (r[0], r[1])))
_resolutions_cache[device] = resolutions
return resolutions
def device_present(device):
try:
st = os.stat(device)
return stat.S_ISCHR(st.st_mode)
except:
return False
def find_persistent_device(device):
try:
devs_by_id = os.listdir(_DEV_V4L_BY_ID)
except OSError:
return device
for p in devs_by_id:
p = os.path.join(_DEV_V4L_BY_ID, p)
if os.path.realpath(p) == device:
return p
return device
def get_brightness(device):
return _get_ctrl(device, 'brightness')
def set_brightness(device, value):
_set_ctrl(device, 'brightness', value)
def get_contrast(device):
return _get_ctrl(device, 'contrast')
def set_contrast(device, value):
_set_ctrl(device, 'contrast', value)
def get_saturation(device):
return _get_ctrl(device, 'saturation')
def set_saturation(device, value):
_set_ctrl(device, 'saturation', value)
def get_hue(device):
return _get_ctrl(device, 'hue')
def set_hue(device, value):
_set_ctrl(device, 'hue', value)
def _get_ctrl(device, control):
global _ctrl_values_cache
if not device_present(device):
return None
if device in _ctrl_values_cache and control in _ctrl_values_cache[device]:
return _ctrl_values_cache[device][control]
controls = _list_ctrls(device)
properties = controls.get(control)
if properties is None:
logging.warn('control %(control)s not found for device %(device)s' % {
'control': control, 'device': device})
return None
value = int(properties['value'])
# adjust the value range
if 'min' in properties and 'max' in properties:
min_value = int(properties['min'])
max_value = int(properties['max'])
value = int(round((value - min_value) * 100.0 / (max_value - min_value)))
else:
logging.warn('min and max values not found for control %(control)s of device %(device)s' % {
'control': control, 'device': device})
logging.debug('control %(control)s of device %(device)s is %(value)s%%' % {
'control': control, 'device': device, 'value': value})
return value
def _set_ctrl(device, control, value):
global _ctrl_values_cache
if not device_present(device):
return
controls = _list_ctrls(device)
properties = controls.get(control)
if properties is None:
logging.warn('control %(control)s not found for device %(device)s' % {
'control': control, 'device': device})
return
_ctrl_values_cache.setdefault(device, {})[control] = value
# adjust the value range
if 'min' in properties and 'max' in properties:
min_value = int(properties['min'])
max_value = int(properties['max'])
value = int(round(min_value + value * (max_value - min_value) / 100.0))
else:
logging.warn('min and max values not found for control %(control)s of device %(device)s' % {
'control': control, 'device': device})
logging.debug('setting control %(control)s of device %(device)s to %(value)s' % {
'control': control, 'device': device, 'value': value})
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d %(device)s --set-ctrl %(control)s=%(value)s' % {
'device': device, 'control': control, 'value': value}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
def _list_ctrls(device):
global _ctrls_cache
if device in _ctrls_cache:
return _ctrls_cache[device]
output = ''
started = time.time()
p = subprocess.Popen('v4l2-ctl -d %(device)s --list-ctrls' % {
'device': device}, shell=True, stdout=subprocess.PIPE, bufsize=1)
fd = p.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
data = p.stdout.read(1024)
if not data:
break
except IOError:
data = ''
time.sleep(0.01)
output += data
if len(output) > 10240:
logging.warn('v4l2-ctl command returned more than 10k of output')
break
if time.time() - started > 3:
logging.warn('v4l2-ctl command ran for more than 3 seconds')
break
try:
# try to kill the v4l2-ctl subprocess
p.kill()
except:
pass # nevermind
controls = {}
for line in output.split('\n'):
if not line:
continue
match = re.match('^\s*(\w+)\s+\(\w+\)\s+\:\s*(.+)', line)
if not match:
continue
(control, properties) = match.groups()
properties = dict([v.split('=', 1) for v in properties.split(' ') if v.count('=')])
controls[control] = properties
_ctrls_cache[device] = controls
return controls
| gpl-3.0 | -6,950,108,781,523,003,000 | 26.302885 | 127 | 0.569202 | false |
intelxed/xed | pysrc/regmap.py | 1 | 3763 | #!/usr/bin/env python
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
from __future__ import print_function
import re
import sys
def die(s):
sys.stderr.write(s+"\n")
sys.exit(1)
class regmap_t(object):
"""This converts register indices to register enumerations. And
vice versa. This replaces some clunkier register lookup machinery
in XED2."""
def __init__(self, dst, ntname,base,index):
self.name = dst
self.ntname = ntname
self.base_reg = base
self.index = index
self.decode_output = ''
self.encode_output = ''
def activate(self):
self.emit_decoder_code()
self.emit_encoder_code()
def dump(self):
print(" DECODER OUTPUT: ", self.decode_output)
print(" ENCODER OUTPUT: ", self.encode_output)
def emit_decoder_code(self):
self.decode_preamble()
self.decode_emit()
self.decode_epilogue()
def emit_encoder_code(self):
self.encode_preamble()
self.encode_emit()
self.encode_epilogue()
def decode_preamble(self):
pass
def decode_emit(self):
d = {}
d['base_reg'] = self.base_reg
d['index'] = self.index
d['name'] = self.name # bypass OUTREG!
c = 'ov[XED_OPERAND_%(name)s]= %(base_reg)s + %(index)s'
self.decode_output += (c%d)
def decode_epilogue(self):
self.decode_output += ";"
def encode_preamble(self):
pass
def encode_emit(self):
d = {}
d['operand_name'] = self.name
d['base_reg'] = self.base_reg
d['index_name'] = self.index
c = "ov[XED_OPERAND_%(index_name)s]= ov[XED_OPERAND_%(operand_name)s] - %(base_reg)s;"
self.encode_output += (c%d)
def encode_epilogue(self):
pass
class parse_regmap_t(object):
def __init__(self):
self.regmaps = {}
def read_line(self,line):
""" Lines have the following very simple format
XMM_1 XMM0 REGINDEX1
"""
a = line.split()
if len(a) != 3:
die("Wrong number of fields on line: " + line)
try:
(ntname, base, index) = a
except:
die("Could not parse " + line)
regmap = regmap_t('OUTREG', ntname, 'XED_REG_'+base, index)
regmap.activate()
if ntname in self.regmaps:
die("Attempting to duplication regmap " + ntname)
self.regmaps[ntname] = regmap
def read_lines(self,lines):
for line in lines:
line = re.sub(r'#.*','',line)
line = line.strip()
if line:
self.read_line(line)
def dump(self):
for g,v in self.regmaps.items():
print(g, ": ")
v.dump()
print("\n\n")
if __name__ == "__main__":
o = regmap_t('OUTREG', 'XMM_1','XED_REG_XMM0','REGIDX1')
o.activate()
o.dump()
p = parse_regmap_t()
lines = ['XMM_1 XMM0 REGIDX1',
'XMM_2 XMM0 REGIDX2',
'YMM_1 YMM0 REGIDX1',
'YMM_2 YMM0 REGIDX2' ]
p.read_lines(lines)
p.dump()
| apache-2.0 | -6,261,212,164,540,289,000 | 27.725191 | 97 | 0.560723 | false |
gaoxiaofeng/troubleShooting | templates/keywords/Disk.py | 1 | 1949 | # -*- coding: utf-8 -*-
from troubleshooting.framework.libraries.library import singleton
import re
from troubleshooting.framework.template.Keyword import *
import traceback
@singleton
class Disk(Keyword):
def __init__(self):
super(self.__class__,self).__init__()
self._diskSize = {}
self._diskInodes = {}
def _listSize(self):
command = "df -hP | awk '{print $5 $NF}'"
stdout = self.execute_command(command)
pattern = re.compile(r"(^\d+)%(\S+)",re.I|re.M)
_list = pattern.findall(stdout)
__list = []
for _tuple in _list:
if len(_tuple) != 2:
continue
__tuple = (_tuple[1],_tuple[0])
__list.append(__tuple)
self._diskSize = dict(__list)
def _listInodes(self):
command = "df -iP | awk '{print $5 $NF}'"
stdout = self.execute_command(command)
pattern = re.compile(r"(^\d+)%(\S+)",re.I|re.M)
_list = pattern.findall(stdout)
__list = []
for _tuple in _list:
if len(_tuple) != 2:
continue
__tuple = (_tuple[1],_tuple[0])
__list.append(__tuple)
self._diskInodes = dict(__list)
def _list(self):
if self._diskInodes == {}:
self._listInodes()
if self._diskSize == {}:
self._listSize()
def get_disk_usage_size(self):
self._list()
return self._diskSize
def get_disk_usage_inodes(self):
self._list()
return self._diskInodes
def is_exist_file(self,path):
command = "ls %s"%path
try:
stdout = self.execute_command(command,checkerr = True)
except Exception,e:
print "raise exception : %s"%traceback.format_exc()
# file not exist
return False
else:
#file exist
return True
if __name__ == "__main__":
disk = Disk()
| apache-2.0 | 1,576,557,107,511,169,300 | 30.435484 | 66 | 0.518214 | false |
bbc/ebu-tt-live-toolkit | testing/bdd/test_ebuttd_multiple_active_regions_overlapping.py | 1 | 5826 | import pytest
from pytest_bdd import scenarios, when, then, parsers
from ebu_tt_live.errors import OverlappingActiveElementsError, RegionExtendingOutsideDocumentError
from ebu_tt_live.documents.converters import EBUTT3EBUTTDConverter
from ebu_tt_live.documents import EBUTT3Document
from ebu_tt_live.documents import EBUTTDDocument
scenarios('features/timing/ebuttd_multiple_active_regions_overlapping.feature')
@when(parsers.parse('it has region "{region_id}"'))
def when_it_contains_region(test_context, template_dict, region_id):
if 'regions' not in template_dict:
template_dict['regions'] = list()
region = {"id": region_id}
template_dict['regions'].append(region)
test_context[region_id] = region
@when(parsers.parse('it has p_element "{p_id}"'))
def when_it_contains_p_element(test_context, template_dict, p_id):
if 'p_elements' not in template_dict:
template_dict['p_elements'] = list()
p_element = {"id": p_id}
template_dict['p_elements'].append(p_element)
test_context[p_id] = p_element
@when(parsers.parse('p_element "{p_id}" has attribute "{attribute}" set to <p1_begin>'))
def when_p1_has_attribute_begin(test_context, p_id, attribute ,p1_begin):
test_context[p_id][attribute] = p1_begin
@when(parsers.parse('p_element "{p_id}" has attribute "{attribute}" set to <p1_end>'))
def when_p1_has_attribute_end(test_context, p_id, attribute ,p1_end):
test_context[p_id][attribute] = p1_end
@when(parsers.parse('p_element "{p_id}" has attribute "{attribute}" set to <p2_begin>'))
def when_p2_has_attribute_begin(test_context, p_id, attribute ,p2_begin):
test_context[p_id][attribute] = p2_begin
@when(parsers.parse('p_element "{p_id}" has attribute "{attribute}" set to <p2_end>'))
def when_p2_has_attribute_end(test_context, p_id, attribute ,p2_end):
test_context[p_id][attribute] = p2_end
@when(parsers.parse('p_element "{p_id}" has attribute "{attribute}" set to "{value}"'))
def when_p_element_has_attribute(test_context, p_id, attribute ,value):
test_context[p_id][attribute] = value
@when(parsers.parse('region "{region_id}" has attribute "{attribute}" set to <r1_origin>'))
def when_region1_has_attribute_origin(test_context, region_id, attribute ,r1_origin):
test_context[region_id][attribute] = r1_origin
@when(parsers.parse('region "{region_id}" has attribute "{attribute}" set to <r1_extent>'))
def when_region1_has_attribute_extent(test_context, region_id, attribute ,r1_extent):
test_context[region_id][attribute] = r1_extent
@when(parsers.parse('region "{region_id}" has attribute "{attribute}" set to <r2_origin>'))
def when_region2_has_attribute_origin(test_context, region_id, attribute ,r2_origin):
test_context[region_id][attribute] = r2_origin
@when(parsers.parse('region "{region_id}" has attribute "{attribute}" set to <r2_extent>'))
def when_region2_has_attribute_extent(test_context, region_id, attribute ,r2_extent):
test_context[region_id][attribute] = r2_extent
@when(parsers.parse('region "{region_id}" has attribute "{attribute}" set to <r3_origin>'))
def when_region3_has_attribute_origin(test_context, region_id, attribute ,r3_origin):
test_context[region_id][attribute] = r3_origin
@when(parsers.parse('region "{region_id}" has attribute "{attribute}" set to <r3_extent>'))
def when_region3_has_attribute_extent(test_context, region_id, attribute ,r3_extent):
test_context[region_id][attribute] = r3_extent
@when(parsers.parse('it contains element with region1 "{region_id}"'))
def when_element_has_attribute_region1(template_dict, region_id):
template_dict['text_region1'] = region_id
@when(parsers.parse('it contains element with region2 "{region_id}"'))
def when_element_has_attribute_region2(template_dict, region_id):
template_dict['text_region2'] = region_id
@then(parsers.parse('application should exit with error OverlappingActiveElementsError'))
def then_application_should_exit_overlapping_active_region_error(
test_context, template_dict):
match_string = "The EBU-TT-D spec forbids overlapping active areas. " \
"Element {elem1_id} references region" \
"id={region1_id}, origin={region1_origin}, extent={region1_extent}" \
" and Element {elem2_id} references region" \
"id={region2_id}, origin={region2_origin}, extent={region2_extent}.".format(
elem1_id=template_dict['p_elements'][0]['id'],
elem2_id=template_dict['p_elements'][1]['id'],
region1_id=template_dict['regions'][0]['id'],
region2_id=template_dict['regions'][1]['id'],
region1_origin=template_dict['regions'][0]['origin'],
region1_extent=template_dict['regions'][0]['extent'],
region2_origin=template_dict['regions'][1]['origin'],
region2_extent=template_dict['regions'][1]['extent'],
)
with pytest.raises(
OverlappingActiveElementsError,
match=match_string):
ebuttd_document = EBUTTDDocument.create_from_raw_binding(
test_context["converted_bindings"])
ebuttd_document.validate()
@when('the EBU-TT-Live document is converted to a EBU-TT-D')
def convert_to_ebuttd(test_context):
ebuttd_converter = EBUTT3EBUTTDConverter(None)
converted_bindings = ebuttd_converter.convert_document(test_context['document'].binding)
test_context["converted_bindings"] = converted_bindings
@then(parsers.parse('application should exit with error RegionExtendingOutsideDocumentError'))
def then_application_should_exit_with_region_error(test_context, template_dict, template_file):
with pytest.raises(RegionExtendingOutsideDocumentError) as e:
ebuttd_document = EBUTTDDocument.create_from_raw_binding(test_context["converted_bindings"])
ebuttd_document.validate()
| bsd-3-clause | -1,952,068,880,815,068,000 | 50.105263 | 100 | 0.706831 | false |
wji/plenarnavi_backend | data/UUID.py | 1 | 1035 | from sqlalchemy.types import TypeDecorator, CHAR
from sqlalchemy.dialects.postgresql import UUID
import uuid
class GUID(TypeDecorator):
"""Platform-independent GUID type.
Uses PostgreSQL's UUID type, otherwise uses
CHAR(32), storing as stringified hex values.
"""
impl = CHAR
def load_dialect_impl(self, dialect):
if dialect.name == 'postgresql':
return dialect.type_descriptor(UUID())
else:
return dialect.type_descriptor(CHAR(32))
def process_bind_param(self, value, dialect):
if value is None:
return value
elif dialect.name == 'postgresql':
return str(value)
else:
if not isinstance(value, uuid.UUID):
return "%.32x" % uuid.UUID(value).int
else:
# hexstring
return "%.32x" % value.int
def process_result_value(self, value, dialect):
if value is None:
return value
else:
return uuid.UUID(value) | gpl-3.0 | 666,983,228,280,939,600 | 27.777778 | 53 | 0.590338 | false |
HPCGISLab/NAWS | workflow.py | 1 | 7938 | #!/usr/bin/python
"""
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: Eric Shook ([email protected])
"""
import os
import datetime
import time
import re
import subprocess
from Queue import Queue
#from threading import Thread
import threading
import sys,getopt
'''
The workflow script accepts a tasklist file, which contains a list of taskfiles.
A task may represent a simulation of an ABM or climate model. Tasks can be run
simultaneously if there are no dependencies or ordered in the case of
dependencies. Tasks may also include pre-processing or post-processing tasks.
'''
# TODO: Logging may be useful if the workflow becomes long
# TODO: Currently num_threads is user-defined, which controls the number of threads to launch tasks
# However, it would be better to include in the taskfile the number of cores needed
# and define the number of cores available, enabling the workflow system to manage core allocation
# Global variables
# The number of threads used to handle tasks is passed as a parameter
num_threads=0
# Array of threads (so they can be killed if needed)
threads=[]
# Array of task workflow numbers (one per thread/worker)
threadtasknums=[]
# Task queue
taskqueue=Queue()
# This function handles executing a task defined by a taskfile
def runtask(taskfile):
# Read and parse the taskfile with the following format
# Note additional parameters will likely be added based on need (e.g., CWD, data-dir)
'''
program: /path/to/executable_with_a_name
parameters: param1 -Optionalconfiguration param2 -AnotherParameter
'''
with open(taskfile,'r') as f:
# Set the required parameters as None for error checking at the end
program=None
parameters=None
for line in f:
if line.startswith("program:"):
# Extract the entire program location from after the colon split()[1]) with whitespace removed (strip())
program=line.split(":",1)[1].strip()
#print "Program="+program
if line.startswith("parameters:"):
# Extract the parameter string from after the colon split()[1]) with whitespace removed (strip())
parameters=line.split(":",1)[1].strip()
#print "Parameters="+parameters
# Error checking for required parameters
if program==None:
raise Exception("program missing in taskfile",taskfile)
if parameters==None:
raise Exception("parameters missing in taskfile",taskfile)
print "Calling program="+program,parameters
'''
In future versions that have defined input,output,stdout,etc.
there could be more logic here to:
- run each model in a defined directory
- output stdout,stderr in the directory
- package up output files for easier transfer
- ...
'''
returncode=subprocess.check_call(program+" "+parameters,shell=True)
# A task worker loops while there are tasks left in the taskqueue
# Input parameter is a thread id (tid)
def taskworker(tid):
while not taskqueue.empty():
taskfile=taskqueue.get()
tasknum=taskfile.split("/",1)[1].split(".",1)[0].strip()
tasknum=re.sub("\D", "", tasknum)
#print "tid=",tid
threadtasknums[tid]=int(tasknum)
# While there is a dependency problem (lower order task numbers are still being processed)
# then spintwait
mintasknum=min(threadtasknums)
while threadtasknums[tid]>mintasknum:
#print "min=",minthreadtasknum,"min(array)=",min(*threadtasknums),"nums[",i,"]=",threadtasknums[i]
#if(threadtasknums[tid]<=min(*threadtasknums)): # If this task number is less than or equal to the minimum
# break # then there are no dependencies, so you can break out of this infinite loop
time.sleep(1) # this is a spin-wait loop
mintasknum=min(*threadtasknums)
print "Thread",tid,"running",taskfile,"at",str(datetime.datetime.now())
try:
runtask(taskfile)
except:
exit(1)
taskqueue.task_done()
threadtasknums[tid]=999999 # Set the tasknum for tid to 9999 so it doesn't influence dependencies
print "Thread",tid,"quitting, because taskqueue is empty"
# Main program code
def main():
print "Starting node workflow"
try:
opts,args=getopt.getopt(sys.argv[1:],"n:t:",["numthreads=","tasklist="])
except getopt.GetoptError:
print "workflow.py -n <number of threads to launch> -t <tasklistfile>"
sys.exit(1)
# Set model filename and experiment name based on command-line parameter
num_threads=0
tasklistfile=""
for opt, arg in opts:
if opt in ("-n", "--numthreads"):
num_threads=int(arg)
if opt in ("-t", "--tasklist"):
tasklistfile=arg
err=0
if num_threads<=0:
print " [ ERROR ] Number of threads must be greater than 0"
err=1
if tasklistfile=="":
print " [ ERROR ] Must provide tasklistfile"
err=1
if err==1:
print "workflow.py -n <number of threads to launch> -t <tasklistfile>"
sys.exit(1)
print "Executing in current directory :",os.getcwd()
print "Reading tasklist file"
with open(tasklistfile,'r') as f:
taskfiles = f.readlines()
f.close()
# tasksdir = 'tasks/'
# taskfiles = os.listdir(tasksdir) # Contains a list of task files to process
taskfiles.sort()
print "Starting task queue"
for taskfile in taskfiles:
taskqueue.put(taskfile.strip())
print "Task queue contains ",taskqueue.qsize()," tasks"
# Start the workflow engine
# Currently the logic is simple -> one task==one thread==one core but that will need
# to be modified to account for multithreaded models (agent-based and climate)
# so eventually this will need to parse the task to determine the number of cores
# needed by the task and dynamically manage the number of tasks running simultaneously
print "Starting ",num_threads," threads"
for i in range(num_threads):
threadtasknums.append(-1)
t=threading.Thread(target=taskworker,args=(i,))
t.daemon=True
t.setDaemon(True)
t.start()
threads.append(t)
# Now we wait until all of the tasks are finished.
print "Waiting for threads to finish"
# Normally you can use a blocking .join, but then you cannot kill the process
# So instead we spin-wait and catch ^C so a user can kill this process.
# while threading.activeCount() > 0:
# time.sleep(20)
while taskqueue.qsize()>0:
time.sleep(1)
print "taskqueue size",taskqueue.qsize()
''' # FIXME: Need to clean up this code, which was used for testing ^C
try:
time.sleep(5) # Wait 5 seconds before checking again
# FIXME: In production this should be changed to 30
# If Ctrl+C or other error, kill all of the threads
except:
while not taskqueue.empty(): # Empty the queue
taskqueue.get()
for i in threads:
i.kill_received=True
i.kill()
exit(1)
'''
print "Joining taskqueue"
# At this point all of the tasks should be finished so we join them
notfinished=1
while notfinished==1:
notfinished=0
for i in range(num_threads):
if threadtasknums[i]<999999:
notfinished=1
time.sleep(1)
#while not taskqueue.join(1):
# time.sleep(1)
print "Finished node workflow"
# Run main
if __name__=="__main__":
main()
| bsd-3-clause | 6,810,888,699,355,148,000 | 35.75 | 120 | 0.644621 | false |
squarebracket/star | scheduler_site/wsgi.py | 1 | 1475 | """
WSGI config for scheduler_site project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "scheduler_site.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scheduler_site.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| gpl-2.0 | 6,664,730,830,470,159,000 | 44.09375 | 79 | 0.776949 | false |
ecolell/aquire | setup.py | 1 | 2553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import os
import subprocess
from setuptools.command import easy_install
def parse_requirements(filename):
return list(filter(lambda line: (line.strip())[0] != '#',
[line.strip() for line in open(filename).readlines()]))
def calculate_version():
# Fetch version from git tags, and write to version.py.
# Also, when git is not available (PyPi package), use stored version.py.
version_py = os.path.join(os.path.dirname(__file__), 'version.py')
try:
version_git = subprocess.check_output(["git", "describe"]).rstrip()
except Exception:
with open(version_py, 'r') as fh:
version_git = (open(version_py).read()
.strip().split('=')[-1].replace('"', ''))
version_msg = ('# Do not edit this file, pipeline versioning is '
'governed by git tags')
with open(version_py, 'w') as fh:
fh.write(version_msg + os.linesep + "__version__=" + version_git)
return version_git
requirements = parse_requirements('requirements.txt')
version_git = calculate_version()
def get_long_description():
readme_file = 'README.md'
if not os.path.isfile(readme_file):
return ''
# Try to transform the README from Markdown to reStructuredText.
try:
easy_install.main(['-U', 'pyandoc==0.0.1'])
import pandoc
pandoc.core.PANDOC_PATH = 'pandoc'
doc = pandoc.Document()
doc.markdown = open(readme_file).read()
description = doc.rst
except Exception:
description = open(readme_file).read()
return description
setup(
name='aquire',
version=version_git,
author=u'Eloy Adonis Colell',
author_email='[email protected]',
packages=['aquire'],
url='https://github.com/ecolell/aquire',
license='MIT',
description=('A python library that allow to download files '
'from internet and show progress to the console.'),
long_description=get_long_description(),
zip_safe=True,
install_requires=requirements,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: File Transfer Protocol (FTP)",
],
)
| mit | 4,310,986,713,263,461,400 | 33.04 | 78 | 0.614179 | false |
crispycret/crispys_webkit | tests/urls.py | 1 | 3789 | import unittest
from crispys_webkit.urls import LazyUrl
stackoverflow_url = 'http://stackoverflow.com/'
def create_stackoverflow_lazyurl():
return LazyUrl(stackoverflow_url)
class LazyUrlMixin(object):
def check_stackoverflow_url(self, url):
self.assertEqual(url.scheme, 'http')
self.assertEqual(url.host, 'stackoverflow.com')
self.assertEqual(url.path, '/')
self.assertEqual(str(url), 'http://stackoverflow.com/')
class LazyUrlCreationTests(LazyUrlMixin, unittest.TestCase):
#### Object Instantiation ################################
def test_create_lazy_url(self):
""" Create a normal LazyUrl """
url = LazyUrl('http://stackoverflow.com/')
self.check_stackoverflow_url(url)
def test_create_lazy_url_with_bad_scheme(self):
""" use a scheme that is not allowed """
url = LazyUrl('ftp://stackoverflow.com')
self.check_stackoverflow_url(url)
def test_create_lazy_url_with_no_scheme(self):
""" don't use a scheme """
url = LazyUrl('stackoverflow.com')
self.check_stackoverflow_url(url)
##########################################################
class LazyUrlGetSetTests(LazyUrlMixin, unittest.TestCase):
#### Set Methods #########################################
def test_set_scheme_with_bad_scheme(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_scheme('ssh')
self.assertEqual(url.scheme, 'http')
self.assertEqual(str(url), 'http://stackoverflow.com/')
def test_set_scheme_with_good_scheme(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_scheme('https')
self.assertEqual(url.scheme, 'https')
self.assertEqual(str(url), 'https://stackoverflow.com/')
def test_set_host(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_host('news.ycombinator.com')
self.assertEqual(str(url), 'http://news.ycombinator.com/')
def test_set_path(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_path('/user/1234/crispycret')
self.assertIn(stackoverflow_url, str(url))
self.assertEqual(url.path, '/user/1234/crispycret')
self.assertEqual(str(url), 'http://stackoverflow.com/user/1234/crispycret')
def test_set_params(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_params('price')
self.assertEqual(str(url), 'http://stackoverflow.com/;price')
def test_set_query(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_query('id=123')
self.assertEqual(str(url), 'http://stackoverflow.com/?id=123')
def test_set_fragment(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_fragment('someLabel')
self.assertIn(stackoverflow_url, str(url))
self.assertEqual(url.fragment, 'someLabel')
self.assertEqual(str(url), 'http://stackoverflow.com/#someLabel')
##########################################################
class LazyUrlMethodTests(LazyUrlMixin, unittest.TestCase):
def test_get_full_path(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_path('question/55555/SomeQuestion')
url.set_fragment('bookmark')
self.assertEqual(url.get_full_path(), '/question/55555/SomeQuestion#bookmark')
def test_clear_full_path(self):
url = create_stackoverflow_lazyurl()
self.check_stackoverflow_url(url)
url.set_scheme('https')
url.set_path('question/55555/SomeQuestion')
url.set_params('details')
url.set_query('id=1')
url.set_fragment('bookmark')
self.assertEqual(str(url), 'https://stackoverflow.com/question/55555/SomeQuestion;details?id=1#bookmark')
url.clear_full_path()
self.assertEqual(str(url), 'https://stackoverflow.com/')
if __name__ == '__main__':
unittest.main() | mit | 4,662,887,234,070,625,000 | 31.393162 | 107 | 0.691475 | false |
Pointedstick/ReplicatorG | skein_engines/skeinforge-44/fabmetheus_utilities/xml_simple_reader.py | 1 | 25544 | """
The xml_simple_reader.py script is an xml parser that can parse a line separated xml text.
This xml parser will read a line seperated xml text and produce a tree of the xml with a document element. Each element can have an attribute table, childNodes, a class name, parentNode, text and a link to the document element.
This example gets an xml tree for the xml file boolean.xml. This example is run in a terminal in the folder which contains boolean.xml and xml_simple_reader.py.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> fileName = 'boolean.xml'
>>> file = open(fileName, 'r')
>>> xmlText = file.read()
>>> file.close()
>>> from xml_simple_reader import DocumentNode
>>> xmlParser = DocumentNode(fileName, xmlText)
>>> print( xmlParser )
?xml, {'version': '1.0'}
ArtOfIllusion, {'xmlns:bf': '//babelfiche/codec', 'version': '2.0', 'fileversion': '3'}
Scene, {'bf:id': 'theScene'}
materials, {'bf:elem-type': 'java.lang.Object', 'bf:list': 'collection', 'bf:id': '1', 'bf:type': 'java.util.Vector'}
..
many more lines of the xml tree
..
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.geometry_utilities import matrix
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import xml_simple_writer
import cStringIO
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalGetAccessibleAttributeSet = set('getPaths getPreviousVertex getPreviousElementNode getVertexes parentNode'.split())
def createAppendByText(parentNode, xmlText):
'Create and append the child nodes from the xmlText.'
monad = OpenMonad(parentNode)
for character in xmlText:
monad = monad.getNextMonad(character)
def createAppendByTextb(parentNode, xmlText):
'Create and append the child nodes from the xmlText.'
monad = OpenMonad(parentNode)
for character in xmlText:
monad = monad.getNextMonad(character)
def getDocumentNode(fileName):
'Get the document from the file name.'
xmlText = getFileText('test.xml')
return DocumentNode(fileName, xmlText)
def getFileText(fileName, printWarning=True, readMode='r'):
'Get the entire text of a file.'
try:
file = open(fileName, readMode)
fileText = file.read()
file.close()
return fileText
except IOError:
if printWarning:
print('The file ' + fileName + ' does not exist.')
return ''
class CDATASectionMonad:
'A monad to handle a CDATASection node.'
def __init__(self, input, parentNode):
'Initialize.'
self.input = input
self.parentNode = parentNode
def getNextMonad(self, character):
'Get the next monad.'
self.input.write(character)
if character == '>':
inputString = self.input.getvalue()
if inputString.endswith(']]>'):
textContent = '<%s\n' % inputString
self.parentNode.childNodes.append(CDATASectionNode(self.parentNode, textContent))
return OpenMonad(self.parentNode)
return self
class CDATASectionNode:
'A CDATASection node.'
def __init__(self, parentNode, textContent=''):
'Initialize.'
self.parentNode = parentNode
self.textContent = textContent
def __repr__(self):
'Get the string representation of this CDATASection node.'
return self.textContent
def addToIdentifierDictionaries(self):
'Add the element to the owner document identifier dictionaries.'
pass
def addXML(self, depth, output):
'Add xml for this CDATASection node.'
output.write(self.textContent)
def appendSelfToParent(self):
'Append self to the parentNode.'
self.parentNode.appendChild(self)
def copyXMLChildNodes(self, idSuffix, parentNode):
'Copy the xml childNodes.'
pass
def getAttributes(self):
'Get the attributes.'
return {}
def getChildNodes(self):
'Get the empty set.'
return []
def getCopy(self, idSuffix, parentNode):
'Copy the xml element, set its dictionary and add it to the parentNode.'
copy = self.getCopyShallow()
copy.parentNode = parentNode
copy.appendSelfToParent()
return copy
def getCopyShallow(self, attributes=None):
'Copy the node and set its parentNode.'
return CDATASectionNode(self.parentNode, self.textContent)
def getNodeName(self):
'Get the node name.'
return '#cdata-section'
def getNodeType(self):
'Get the node type.'
return 4
def getOwnerDocument(self):
'Get the owner document.'
return self.parentNode.getOwnerDocument()
def getTextContent(self):
'Get the text content.'
return self.textContent
def removeChildNodesFromIDNameParent(self):
'Remove the childNodes from the id and name dictionaries and the childNodes.'
pass
def removeFromIDNameParent(self):
'Remove this from the id and name dictionaries and the childNodes of the parentNode.'
if self.parentNode != None:
self.parentNode.childNodes.remove(self)
def setParentAddToChildNodes(self, parentNode):
'Set the parentNode and add this to its childNodes.'
self.parentNode = parentNode
if self.parentNode != None:
self.parentNode.childNodes.append(self)
attributes = property(getAttributes)
childNodes = property(getChildNodes)
nodeName = property(getNodeName)
nodeType = property(getNodeType)
ownerDocument = property(getOwnerDocument)
class CommentMonad(CDATASectionMonad):
'A monad to handle a comment node.'
def getNextMonad(self, character):
'Get the next monad.'
self.input.write(character)
if character == '>':
inputString = self.input.getvalue()
if inputString.endswith('-->'):
textContent = '<%s\n' % inputString
self.parentNode.childNodes.append(CommentNode(self.parentNode, textContent))
return OpenMonad(self.parentNode)
return self
class CommentNode(CDATASectionNode):
'A comment node.'
def getCopyShallow(self, attributes=None):
'Copy the node and set its parentNode.'
return CommentNode(self.parentNode, self.textContent)
def getNodeName(self):
'Get the node name.'
return '#comment'
def getNodeType(self):
'Get the node type.'
return 8
nodeName = property(getNodeName)
nodeType = property(getNodeType)
class DocumentNode:
'A class to parse an xml text and store the elements.'
def __init__(self, fileName, xmlText):
'Initialize.'
self.childNodes = []
self.fileName = fileName
self.idDictionary = {}
self.nameDictionary = {}
self.parentNode = None
self.tagDictionary = {}
self.xmlText = xmlText
createAppendByText(self, xmlText)
def __repr__(self):
'Get the string representation of this xml document.'
output = cStringIO.StringIO()
for childNode in self.childNodes:
childNode.addXML(0, output)
return output.getvalue()
def appendChild(self, elementNode):
'Append child elementNode to the child nodes.'
self.childNodes.append(elementNode)
elementNode.addToIdentifierDictionaries()
return elementNode
def getAttributes(self):
'Get the attributes.'
return {}
def getCascadeBoolean(self, defaultBoolean, key):
'Get the cascade boolean.'
return defaultBoolean
def getCascadeFloat(self, defaultFloat, key):
'Get the cascade float.'
return defaultFloat
def getDocumentElement(self):
'Get the document element.'
if len(self.childNodes) == 0:
return None
return self.childNodes[-1]
def getImportNameChain(self, suffix=''):
'Get the import name chain with the suffix at the end.'
return suffix
def getNodeName(self):
'Get the node name.'
return '#document'
def getNodeType(self):
'Get the node type.'
return 9
def getOriginalRoot(self):
'Get the original reparsed document element.'
if evaluate.getEvaluatedBoolean(True, self.documentElement, 'getOriginalRoot'):
return DocumentNode(self.fileName, self.xmlText).documentElement
return None
def getOwnerDocument(self):
'Get the owner document.'
return self
attributes = property(getAttributes)
documentElement = property(getDocumentElement)
nodeName = property(getNodeName)
nodeType = property(getNodeType)
ownerDocument = property(getOwnerDocument)
class DocumentTypeMonad(CDATASectionMonad):
'A monad to handle a document type node.'
def getNextMonad(self, character):
'Get the next monad.'
self.input.write(character)
if character == '>':
inputString = self.input.getvalue()
if inputString.endswith('?>'):
textContent = '%s\n' % inputString
self.parentNode.childNodes.append(DocumentTypeNode(self.parentNode, textContent))
return OpenMonad(self.parentNode)
return self
class DocumentTypeNode(CDATASectionNode):
'A document type node.'
def getCopyShallow(self, attributes=None):
'Copy the node and set its parentNode.'
return DocumentTypeNode(self.parentNode, self.textContent)
def getNodeName(self):
'Get the node name.'
return '#forNowDocumentType'
def getNodeType(self):
'Get the node type.'
return 10
nodeName = property(getNodeName)
nodeType = property(getNodeType)
class ElementEndMonad:
'A monad to look for the end of an ElementNode tag.'
def __init__(self, parentNode):
'Initialize.'
self.parentNode = parentNode
def getNextMonad(self, character):
'Get the next monad.'
if character == '>':
return TextMonad(self.parentNode)
return self
class ElementLocalNameMonad:
'A monad to set the local name of an ElementNode.'
def __init__(self, character, parentNode):
'Initialize.'
self.input = cStringIO.StringIO()
self.input.write(character)
self.parentNode = parentNode
def getNextMonad(self, character):
'Get the next monad.'
if character == '[':
if (self.input.getvalue() + character).startswith('![CDATA['):
self.input.write(character)
return CDATASectionMonad(self.input, self.parentNode)
if character == '-':
if (self.input.getvalue() + character).startswith('!--'):
self.input.write(character)
return CommentMonad(self.input, self.parentNode)
if character.isspace():
self.setLocalName()
return ElementReadMonad(self.elementNode)
if character == '/':
self.setLocalName()
self.elementNode.appendSelfToParent()
return ElementEndMonad(self.elementNode.parentNode)
if character == '>':
self.setLocalName()
self.elementNode.appendSelfToParent()
return TextMonad(self.elementNode)
self.input.write(character)
return self
def setLocalName(self):
'Set the class name.'
self.elementNode = ElementNode(self.parentNode)
self.elementNode.localName = self.input.getvalue().lower().strip()
class ElementNode:
'An xml element.'
def __init__(self, parentNode=None):
'Initialize.'
self.attributes = {}
self.childNodes = []
self.localName = ''
self.parentNode = parentNode
self.xmlObject = None
def __repr__(self):
'Get the string representation of this xml document.'
return '%s\n%s\n%s' % (self.localName, self.attributes, self.getTextContent())
def _getAccessibleAttribute(self, attributeName):
'Get the accessible attribute.'
global globalGetAccessibleAttributeSet
if attributeName in globalGetAccessibleAttributeSet:
return getattr(self, attributeName, None)
return None
def addSuffixToID(self, idSuffix):
'Add the suffix to the id.'
if 'id' in self.attributes:
self.attributes['id'] += idSuffix
def addToIdentifierDictionaries(self):
'Add the element to the owner document identifier dictionaries.'
ownerDocument = self.getOwnerDocument()
importNameChain = self.getImportNameChain()
idKey = self.getStrippedAttributesValue('id')
if idKey != None:
ownerDocument.idDictionary[importNameChain + idKey] = self
nameKey = self.getStrippedAttributesValue('name')
if nameKey != None:
euclidean.addElementToListDictionaryIfNotThere(self, importNameChain + nameKey, ownerDocument.nameDictionary)
for tagKey in self.getTagKeys():
euclidean.addElementToListDictionaryIfNotThere(self, tagKey, ownerDocument.tagDictionary)
def addXML(self, depth, output):
'Add xml for this elementNode.'
innerOutput = cStringIO.StringIO()
xml_simple_writer.addXMLFromObjects(depth + 1, self.childNodes, innerOutput)
innerText = innerOutput.getvalue()
xml_simple_writer.addBeginEndInnerXMLTag(self.attributes, depth, innerText, self.localName, output, self.getTextContent())
def appendChild(self, elementNode):
'Append child elementNode to the child nodes.'
self.childNodes.append(elementNode)
elementNode.addToIdentifierDictionaries()
return elementNode
def appendSelfToParent(self):
'Append self to the parentNode.'
self.parentNode.appendChild(self)
def copyXMLChildNodes(self, idSuffix, parentNode):
'Copy the xml childNodes.'
for childNode in self.childNodes:
childNode.getCopy(idSuffix, parentNode)
def getCascadeBoolean(self, defaultBoolean, key):
'Get the cascade boolean.'
if key in self.attributes:
value = evaluate.getEvaluatedBoolean(None, self, key)
if value != None:
return value
return self.parentNode.getCascadeBoolean(defaultBoolean, key)
def getCascadeFloat(self, defaultFloat, key):
'Get the cascade float.'
if key in self.attributes:
value = evaluate.getEvaluatedFloat(None, self, key)
if value != None:
return value
return self.parentNode.getCascadeFloat(defaultFloat, key)
def getChildNodesByLocalName(self, localName):
'Get the childNodes which have the given class name.'
childNodesByLocalName = []
for childNode in self.childNodes:
if localName.lower() == childNode.getNodeName():
childNodesByLocalName.append(childNode)
return childNodesByLocalName
def getChildNodesByLocalNameRecursively(self, localName):
'Get the childNodes which have the given class name recursively.'
childNodesByLocalName = self.getChildNodesByLocalName(localName)
for childNode in self.childNodes:
childNodesByLocalName += childNode.getChildNodesByLocalNameRecursively(localName)
return childNodesByLocalName
def getCopy(self, idSuffix, parentNode):
'Copy the xml element, set its dictionary and add it to the parentNode.'
matrix4X4 = matrix.getBranchMatrixSetElementNode(self)
attributesCopy = self.attributes.copy()
attributesCopy.update(matrix4X4.getAttributes('matrix.'))
copy = self.getCopyShallow(attributesCopy)
copy.setParentAddToChildNodes(parentNode)
copy.addSuffixToID(idSuffix)
copy.addToIdentifierDictionaries()
self.copyXMLChildNodes(idSuffix, copy)
return copy
def getCopyShallow(self, attributes=None):
'Copy the xml element and set its dictionary and parentNode.'
if attributes == None: # to evade default initialization bug where a dictionary is initialized to the last dictionary
attributes = {}
copyShallow = ElementNode(self.parentNode)
copyShallow.attributes = attributes
copyShallow.localName = self.localName
return copyShallow
def getDocumentElement(self):
'Get the document element.'
return self.getOwnerDocument().getDocumentElement()
def getElementNodeByID(self, idKey):
'Get the xml element by id.'
idDictionary = self.getOwnerDocument().idDictionary
idKey = self.getImportNameChain() + idKey
if idKey in idDictionary:
return idDictionary[idKey]
return None
def getElementNodesByName(self, nameKey):
'Get the xml elements by name.'
nameDictionary = self.getOwnerDocument().nameDictionary
nameKey = self.getImportNameChain() + nameKey
if nameKey in nameDictionary:
return nameDictionary[nameKey]
return None
def getElementNodesByTag(self, tagKey):
'Get the xml elements by tag.'
tagDictionary = self.getOwnerDocument().tagDictionary
if tagKey in tagDictionary:
return tagDictionary[tagKey]
return None
def getFirstChildByLocalName(self, localName):
'Get the first childNode which has the given class name.'
for childNode in self.childNodes:
if localName.lower() == childNode.getNodeName():
return childNode
return None
def getIDSuffix(self, elementIndex=None):
'Get the id suffix from the dictionary.'
suffix = self.localName
if 'id' in self.attributes:
suffix = self.attributes['id']
if elementIndex == None:
return '_%s' % suffix
return '_%s_%s' % (suffix, elementIndex)
def getImportNameChain(self, suffix=''):
'Get the import name chain with the suffix at the end.'
importName = self.getStrippedAttributesValue('_importName')
if importName != None:
suffix = '%s.%s' % (importName, suffix)
return self.parentNode.getImportNameChain(suffix)
def getNodeName(self):
'Get the node name.'
return self.localName
def getNodeType(self):
'Get the node type.'
return 1
def getOwnerDocument(self):
'Get the owner document.'
return self.parentNode.getOwnerDocument()
def getParser(self):
'Get the parser.'
return self.getOwnerDocument()
def getPaths(self):
'Get all paths.'
if self.xmlObject == None:
return []
return self.xmlObject.getPaths()
def getPreviousElementNode(self):
'Get previous ElementNode if it exists.'
if self.parentNode == None:
return None
previousElementNodeIndex = self.parentNode.childNodes.index(self) - 1
if previousElementNodeIndex < 0:
return None
return self.parentNode.childNodes[previousElementNodeIndex]
def getPreviousVertex(self, defaultVector3=None):
'Get previous vertex if it exists.'
if self.parentNode == None:
return defaultVector3
if self.parentNode.xmlObject == None:
return defaultVector3
if len(self.parentNode.xmlObject.vertexes) < 1:
return defaultVector3
return self.parentNode.xmlObject.vertexes[-1]
def getStrippedAttributesValue(self, keyString):
'Get the stripped attribute value if the length is at least one, otherwise return None.'
if keyString in self.attributes:
strippedAttributesValue = self.attributes[keyString].strip()
if len(strippedAttributesValue) > 0:
return strippedAttributesValue
return None
def getSubChildWithID( self, idReference ):
'Get the childNode which has the idReference.'
for childNode in self.childNodes:
if 'bf:id' in childNode.attributes:
if childNode.attributes['bf:id'] == idReference:
return childNode
subChildWithID = childNode.getSubChildWithID( idReference )
if subChildWithID != None:
return subChildWithID
return None
def getTagKeys(self):
'Get stripped tag keys.'
if 'tags' not in self.attributes:
return []
tagKeys = []
tagString = self.attributes['tags']
if tagString.startswith('='):
tagString = tagString[1 :]
if tagString.startswith('['):
tagString = tagString[1 :]
if tagString.endswith(']'):
tagString = tagString[: -1]
for tagWord in tagString.split(','):
tagKey = tagWord.strip()
if tagKey != '':
tagKeys.append(tagKey)
return tagKeys
def getTextContent(self):
'Get the text from the child nodes.'
if len(self.childNodes) == 0:
return ''
firstNode = self.childNodes[0]
if firstNode.nodeType == 3:
return firstNode.textContent
return ''
def getValueByKey( self, key ):
'Get value by the key.'
if key in evaluate.globalElementValueDictionary:
return evaluate.globalElementValueDictionary[key](self)
if key in self.attributes:
return evaluate.getEvaluatedLinkValue(self, self.attributes[key])
return None
def getVertexes(self):
'Get the vertexes.'
if self.xmlObject == None:
return []
return self.xmlObject.getVertexes()
def getXMLProcessor(self):
'Get the xmlProcessor.'
return self.getDocumentElement().xmlProcessor
def linkObject(self, xmlObject):
'Link self to xmlObject and add xmlObject to archivableObjects.'
self.xmlObject = xmlObject
self.xmlObject.elementNode = self
self.parentNode.xmlObject.archivableObjects.append(self.xmlObject)
def printAllVariables(self):
'Print all variables.'
print('attributes')
print(self.attributes)
print('childNodes')
print(self.childNodes)
print('localName')
print(self.localName)
print('parentNode')
print(self.parentNode.getNodeName())
print('text')
print(self.getTextContent())
print('xmlObject')
print(self.xmlObject)
print('')
def printAllVariablesRoot(self):
'Print all variables and the document element variables.'
self.printAllVariables()
documentElement = self.getDocumentElement()
if documentElement != None:
print('')
print('Root variables:')
documentElement.printAllVariables()
def removeChildNodesFromIDNameParent(self):
'Remove the childNodes from the id and name dictionaries and the childNodes.'
childNodesCopy = self.childNodes[:]
for childNode in childNodesCopy:
childNode.removeFromIDNameParent()
def removeFromIDNameParent(self):
'Remove this from the id and name dictionaries and the childNodes of the parentNode.'
self.removeChildNodesFromIDNameParent()
idKey = self.getStrippedAttributesValue('id')
if idKey != None:
idDictionary = self.getOwnerDocument().idDictionary
idKey = self.getImportNameChain() + idKey
if idKey in idDictionary:
del idDictionary[idKey]
nameKey = self.getStrippedAttributesValue('name')
if nameKey != None:
euclidean.removeElementFromListTable(self, self.getImportNameChain() + nameKey, self.getOwnerDocument().nameDictionary)
for tagKey in self.getTagKeys():
euclidean.removeElementFromListTable(self, tagKey, self.getOwnerDocument().tagDictionary)
if self.parentNode != None:
self.parentNode.childNodes.remove(self)
def setParentAddToChildNodes(self, parentNode):
'Set the parentNode and add this to its childNodes.'
self.parentNode = parentNode
if self.parentNode != None:
self.parentNode.childNodes.append(self)
def setTextContent(self, textContent=''):
'Get the text from the child nodes.'
if len(self.childNodes) == 0:
self.childNodes.append(TextNode(self, textContent))
return
firstNode = self.childNodes[0]
if firstNode.nodeType == 3:
firstNode.textContent = textContent
self.childNodes.append(TextNode(self, textContent))
nodeName = property(getNodeName)
nodeType = property(getNodeType)
ownerDocument = property(getOwnerDocument)
textContent = property(getTextContent)
class ElementReadMonad:
'A monad to read the attributes of the ElementNode tag.'
def __init__(self, elementNode):
'Initialize.'
self.elementNode = elementNode
def getNextMonad(self, character):
'Get the next monad.'
if character.isspace():
return self
if character == '/':
self.elementNode.appendSelfToParent()
return ElementEndMonad(self.elementNode.parentNode)
if character == '>':
self.elementNode.appendSelfToParent()
return TextMonad(self.elementNode)
return KeyMonad(character, self.elementNode)
class KeyMonad:
'A monad to set the key of an attribute of an ElementNode.'
def __init__(self, character, elementNode):
'Initialize.'
self.input = cStringIO.StringIO()
self.input.write(character)
self.elementNode = elementNode
def getNextMonad(self, character):
'Get the next monad.'
if character == '=':
return ValueMonad(self.elementNode, self.input.getvalue().strip())
self.input.write(character)
return self
class OpenChooseMonad(ElementEndMonad):
'A monad to choose the next monad.'
def getNextMonad(self, character):
'Get the next monad.'
if character.isspace():
return self
if character == '?':
input = cStringIO.StringIO()
input.write('<?')
return DocumentTypeMonad(input, self.parentNode)
if character == '/':
return ElementEndMonad(self.parentNode.parentNode)
return ElementLocalNameMonad(character, self.parentNode)
class OpenMonad(ElementEndMonad):
'A monad to handle the open tag character.'
def getNextMonad(self, character):
'Get the next monad.'
if character == '<':
return OpenChooseMonad(self.parentNode)
return self
class TextMonad:
'A monad to handle the open tag character and set the text.'
def __init__(self, parentNode):
'Initialize.'
self.input = cStringIO.StringIO()
self.parentNode = parentNode
def getNextMonad(self, character):
'Get the next monad.'
if character == '<':
inputString = self.input.getvalue().strip()
if len(inputString) > 0:
self.parentNode.childNodes.append(TextNode(self.parentNode, inputString))
return OpenChooseMonad(self.parentNode)
self.input.write(character)
return self
class TextNode(CDATASectionNode):
'A text node.'
def addXML(self, depth, output):
'Add xml for this text node.'
pass
def getCopyShallow(self, attributes=None):
'Copy the node and set its parentNode.'
return TextNode(self.parentNode, self.textContent)
def getNodeName(self):
'Get the node name.'
return '#text'
def getNodeType(self):
'Get the node type.'
return 3
nodeName = property(getNodeName)
nodeType = property(getNodeType)
class ValueMonad:
'A monad to set the value of an attribute of an ElementNode.'
def __init__(self, elementNode, key):
'Initialize.'
self.elementNode = elementNode
self.input = cStringIO.StringIO()
self.key = key
self.quoteCharacter = None
def getNextMonad(self, character):
'Get the next monad.'
if self.quoteCharacter == None:
if character == '"' or character == "'":
self.quoteCharacter = character
return self
if self.quoteCharacter == character:
self.elementNode.attributes[self.key] = self.input.getvalue()
return ElementReadMonad(self.elementNode)
self.input.write(character)
return self
| gpl-2.0 | -4,695,995,734,451,366,000 | 29.555024 | 228 | 0.743541 | false |
fablabnbg/inkscape-chain-paths | setup.py | 1 | 1767 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sudo zypper in python-setuptools
# http://docs.python.org/2/distutils/setupscript.html#installing-additional-files
#
from __future__ import print_function
import sys,os,glob,re
from distutils.core import setup
from setuptools.command.test import test as TestCommand
import chain_paths # for author(), version()
e = chain_paths.ChainPaths()
m = re.match('(.*)\s+<(.*)>', e.author())
# print('.',['Makefile'] + glob.glob('chain_paths*'))
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(name='chain-paths',
version = e.version(),
description='Inkscape extension making long continuous paths',
author=m.groups()[0],
author_email=m.groups()[1],
url='https://github.com/jnweiger/inkscape-chain-paths',
scripts=filter(os.path.isfile, ['chain_paths.py', 'chain_paths.inx', 'README.md' ] ),
packages=['chain-paths'],
license='GPL-2.0',
classifiers=[
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Environment :: Console',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
cmdclass={'test': PyTest},
long_description="".join(open('README.md').readlines()),
# tests_require=['pytest', 'scipy'],
#packages=['pyPdf','reportlab.pdfgen','reportlab.lib.colors','pygame.font' ],
#
)
| gpl-2.0 | 4,223,029,791,495,990,000 | 31.127273 | 91 | 0.624222 | false |
haandol/algorithm_in_python | tree/check_full_bin_tree.py | 1 | 1126 | # http://www.geeksforgeeks.org/check-whether-binary-tree-full-binary-tree-not/
from __init__ import Node
def solution(root):
if not root:
return True
if root.left and not root.right:
return False
if root.right and not root.left:
return False
return solution(root.left) and solution(root.right)
if __name__ == '__main__':
root = Node(1)
print(solution(root))
root = Node(1)
root.left = Node(2)
print(solution(root))
root = Node(1)
root.left = Node(2)
root.right = Node(3)
root.left.left = Node(4)
print(solution(root))
root = Node(10)
root.left = Node(20)
root.right = Node(30)
root.left.right = Node(40)
root.left.left = Node(50)
root.right.left = Node(60)
root.right.right = Node(70)
root.left.left.left = Node(80)
root.left.left.right = Node(90)
root.left.right.left = Node(80)
root.left.right.right = Node(90)
root.right.left.left = Node(80)
root.right.left.right = Node(90)
root.right.right.left = Node(80)
root.right.right.right = Node(90)
print(solution(root))
| mit | 7,824,998,618,597,669,000 | 20.653846 | 78 | 0.615453 | false |
mmcauliffe/python-praat-scripts | tests/test_short.py | 1 | 2226 | import os
import pytest
from pyraat import PraatAnalysisFunction
from pyraat.parse_outputs import parse_track_script_output, parse_point_script_output
from pyraat.exceptions import PyraatError
def test_short_formant_track(praat_path, praat_script_test_dir, vowel_sound_file):
script_path = os.path.join(praat_script_test_dir, 'formant_track.praat')
func = PraatAnalysisFunction(script_path, praat_path, arguments=[0.01, 0.025, 5, 5500])
assert not func.point_measure
assert not func.uses_long
assert func._output_parse_function == parse_track_script_output
output = func(vowel_sound_file)
header = ['F1', 'B1', 'F2', 'B2', 'F3', 'B3', 'F4', 'B4', 'F5', 'B5']
assert all(isinstance(x, float) for x in output.keys())
for k, v in output.items():
assert isinstance(k, float)
assert sorted(v.keys()) == sorted(header)
for k2, v2 in v.items():
assert isinstance(k2, str)
assert isinstance(v2, (float, type(None)))
def test_short_formant_point(praat_path, praat_script_test_dir, vowel_sound_file):
script_path = os.path.join(praat_script_test_dir, 'formant_point.praat')
func = PraatAnalysisFunction(script_path, praat_path, arguments=[0.33, 5, 5500])
assert func.point_measure
assert not func.uses_long
assert func._output_parse_function == parse_point_script_output
header = ['F1', 'B1', 'F2', 'B2', 'F3', 'B3', 'F4', 'B4', 'F5', 'B5']
output = func(vowel_sound_file, 0.33, 5, 5000)
with pytest.raises(PyraatError):
func(vowel_sound_file, 0, 0.33, 5, 5000)
assert sorted(output.keys()) == sorted(header)
for k, v in output.items():
assert isinstance(k, str)
assert isinstance(v, (float, type(None)))
def test_short_cog(praat_path, praat_script_test_dir, vowel_sound_file):
script_path = os.path.join(praat_script_test_dir, 'COG.praat')
func = PraatAnalysisFunction(script_path, praat_path)
assert func.point_measure
assert not func.uses_long
assert func._output_parse_function == parse_point_script_output
output = func(vowel_sound_file)
assert sorted(output.keys()) == ['cog']
assert all(isinstance(x, float) for x in output.values())
| gpl-3.0 | -8,958,473,808,041,287,000 | 37.37931 | 91 | 0.672058 | false |
TinyOS-Camp/DDEA-DEV | Archive/[14_10_11] Dr_Jung_Update/pre_bn_state_processing.py | 1 | 91559 | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 5 15:28:13 2014
@author: deokwooj
"""
from __future__ import division # To forace float point division
import numpy as np
from scipy import stats
from scipy.interpolate import interp1d
from sklearn import mixture
#from sklearn.cluster import Ward
from sklearn.cluster import KMeans
import time
##################################################################
# Custom library
##################################################################
from data_tools import *
from shared_constants import *
import pprint
import lib_bnlearn as rbn
def X_INPUT_to_states(xinput,CORR_VAL_OUT=0,PARALLEL = False):
#import pdb;pdb.set_trace()
sinput=np.zeros(xinput.shape)
num_samples=xinput.shape[0]
num_sensors=xinput.shape[1]
if num_samples <num_sensors:
print 'Warning number of samplesa are smaller than number of sensors'
print 'Mapping',xinput.shape, ' marix to discrete states '
for k,samples in enumerate(xinput.T):
obs=samples[:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum= state_retrieval(obs,max_num_cluster=6,est_method='kmean',PARALLEL=PARALLEL,PRINTSHOW=False)
high_peak_label_idx=np.argmax(model.cluster_centers_)
low_peak_label_idx=np.argmin(model.cluster_centers_)
high_peak_idx=np.nonzero(label==high_peak_label_idx)[0]
sinput[high_peak_idx,k]=1
low_peak_idx=np.nonzero(label==low_peak_label_idx)[0]
sinput[low_peak_idx,k]=-1
corr_state_val=[]
if CORR_VAL_OUT==1:
print 'Compute Correlation Score....'
for k,(row1, row2) in enumerate(zip(sinput.T, xinput.T)):
corr_state_val.append(round(stats.pearsonr(row1,row2)[0],3))
corr_state_val=np.array(corr_state_val)
return sinput,corr_state_val
def interpolation_measurement(data_dict,input_names,err_rate=1,sgm_bnd=20):
print 'interploattion starts....'
measurement_point_set=[]
num_of_discrete_val=[]
sampling_interval_set=[]
num_type_set=[]
err_rate=1;sgm_bnd=20
"""
try:
import pdb;pdb.set_trace()
except ValueError:
import pdb;pdb.set_trace()
"""
for i,key_name in enumerate(input_names):
print key_name,'.....'
t_=np.array(data_dict[key_name][2][0])
if len(t_) == 0:
continue
intpl_intv=np.ceil((t_[-1]-t_[0]) /len(t_))
sampling_interval_set.append(intpl_intv)
val_=np.array(data_dict[key_name][2][1])
num_of_discrete_val_temp=len(set(val_))
num_of_discrete_val.append(num_of_discrete_val_temp)
# filtering outlier
# assuming 1% of errors and 30 x standard deviation rules
outlier_idx=outlier_detect(val_,err_rate,sgm_bnd)
if len(outlier_idx)>0:
print 'outlier samples are detected: ', 'outlier_idx:', outlier_idx
t_=np.delete(t_,outlier_idx)
val_=np.delete(val_,outlier_idx)
t_new=np.r_[t_[0]:t_[-1]:intpl_intv]
"""
if num_of_discrete_val_temp<MIN_NUM_VAL_FOR_FLOAT:
num_type=INT_TYPE
val_new=fast_nearest_interp(t_new, t_,val_)
else:
num_type=FLOAT_TYPE
val_new = np.interp(t_new, t_,val_)
"""
num_type=check_data_type(data_dict[key_name][2][1])
if num_type==INT_TYPE:
val_new=fast_nearest_interp(t_new, t_,val_)
else:
#num_type=FLOAT_TYPE
val_new = np.interp(t_new, t_,val_)
c=np.vstack([t_new,val_new])
measurement_point_set.append(c)
num_type_set.append(num_type)
print '-----------------------------------------------------------------'
#return measurement_point_set,num_type_set,num_of_discrete_val,sampling_interval_set
return measurement_point_set,np.array(num_type_set)
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def rolling_window_label_mode(label,r_window):
if (r_window/2)==int(r_window/2):
r_window=int(r_window+1)
#raise NameError('length of window size must be odd')
offset=int(r_window/2)
rw_label_temp=stats.mode(rolling_window(label, r_window),1)[0]
head= rw_label_temp[0]*np.ones([offset,1])
body=rw_label_temp
tail= rw_label_temp[-1]*np.ones([offset,1])
rw_label=np.r_[head,body,tail]
return rw_label
def rolling_window_label_binary(label,r_window):
if (r_window/2)==int(r_window/2):
r_window=int(r_window+1)
#raise NameError('length of window size must be odd')
offset=int(r_window/2)
rw_label_temp=np.array([ np.sum(temp)/r_window for temp in rolling_window(label, r_window)])
#import pdb;pdb.set_trace()
# rw_label_temp=stats.mode(rolling_window(label, r_window),1)[0]
head= rw_label_temp[0]*np.ones([offset,1])
body=rw_label_temp
tail= rw_label_temp[-1]*np.ones([offset,1])
rw_label=np.r_[head,body[:,np.newaxis],tail]
return rw_label
"""
def state_retrieval(obs,max_num_cluster=6,est_method='kmean'):
#print '========================================================================='
#print 'Retrieving discrete states from data using ',est_method, ' model...'
#print '========================================================================='
score=np.zeros(max_num_cluster)
model_set=[]
#print 'try ',max_num_cluster, ' clusters..... '
for num_cluster in range(max_num_cluster):
#print 'Try ',num_cluster+1, ' clusters '
#print '-----------------------------------'
if est_method=='kmean':
kmean=KMeans(n_clusters=num_cluster+1).fit(obs)
model_set.append(kmean)
#import pdb;pdb.set_trace()
score[num_cluster]=np.sum(kmean.score(obs))
elif est_method=='gmm':
gmm = mixture.GMM(n_components=num_cluster+1).fit(obs)
model_set.append(gmm)
score[num_cluster]=np.sum(gmm.score(obs))
else:
raise NameError('not supported est_method')
score_err_sum=np.zeros(max_num_cluster)
#print 'Finding knee points of log likelihood...'
for i in range(max_num_cluster):
a_0=score[:(i)]
if len(a_0)>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(a_0)),a_0)
sqr_sum_err0=sum(((slope*np.arange(len(a_0))+ intercept)-a_0)**2)
else:
sqr_sum_err0=0
a_1=score[(i):]
if len(a_1)>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(a_1)),a_1)
sqr_sum_err1=sum(((slope*np.arange(len(a_1))+ intercept)-a_1)**2)
else:
sqr_sum_err1=0
score_err_sum[i]=sqr_sum_err0+sqr_sum_err1
# Optimum number of clusters.
min_idx=np.argmin(score_err_sum)
opt_num_cluster=min_idx+1
#print 'opt_num_cluster: ' , opt_num_cluster
if est_method=='kmean':
label=model_set[min_idx].labels_
elif est_method=='gmm':
label=model_set[min_idx].predict(obs)
else:
raise NameError('not supported est_method')
return label,opt_num_cluster, model_set[min_idx],score,score_err_sum
"""
def cluster_state_retrieval(tup):
obs = tup[0]
num_clusters = tup[1]
est_method = tup[2]
#print 'num clusters = ' + str(num_clusters)
if est_method=='kmean':
kmean=KMeans(n_clusters=num_clusters).fit(obs)
model = kmean
score=compute_log_ll(kmean.labels_,obs)
#score=-1*np.log(-1*np.sum(kmean.score(obs)))
elif est_method=='gmm':
gmm = mixture.GMM(n_components=num_clusters).fit(obs)
model = gmm
score=np.sum(gmm.score(obs))
#print 'Done ' + str(num_clusters)
return (num_clusters-1, [model,score])
from multiprocessing import Pool
def compute_log_ll(label_in,obs_in):
log_ll_sum=0
for i in range(label_in.max()+1):
idx=np.nonzero(label_in==i)[0]
val_set=obs_in[idx]
log_val=stats.norm.logpdf(val_set,loc=np.mean(val_set),scale=np.std(val_set))
log_ll_sum=log_ll_sum+sum(log_val[log_val!=-np.inf])
return log_ll_sum
def state_retrieval(obs,max_num_cluster=6,off_set=0,est_method='kmean',PARALLEL = False,PRINTSHOW=False):
if PRINTSHOW==True:
print '========================================================================='
print 'Retrieving discrete states from data using ',est_method, ' model...'
print '========================================================================='
print 'try ',max_num_cluster, ' clusters..... '
score=np.zeros(max_num_cluster)
model_set=[]
if not PARALLEL:
for num_cluster in range(max_num_cluster):
#print 'Try ',num_cluster+1, ' clusters '
#print '-----------------------------------'
if est_method=='kmean':
kmean=KMeans(n_clusters=num_cluster+1).fit(obs)
model_set.append(kmean)
#import pdb;pdb.set_trace()
#score[num_cluster]=-1*np.log(-1*np.sum(kmean.score(obs)))
#score[num_cluster]=kmean.score(obs)
#score[num_cluster]=kmean.score(obs)-.5*(num_cluster+1)*1*log10(len(obs))
#log_ll_val=compute_log_ll(kmean.labels_,obs)
score[num_cluster]=compute_log_ll(kmean.labels_,obs)
elif est_method=='gmm':
gmm = mixture.GMM(n_components=num_cluster+1).fit(obs)
model_set.append(gmm)
score[num_cluster]=np.sum(gmm.score(obs))
else:
raise NameError('not supported est_method')
else:
if PRINTSHOW==True:
print 'Parallel enabled...'
model_set = [0] * max_num_cluster
score = [0] * max_num_cluster
p = Pool(max_num_cluster)
params = [(obs,i+1,est_method) for i in range(max_num_cluster)]
model_dict = dict(p.map(cluster_state_retrieval,params))
for k,v in model_dict.iteritems():
model_set[k] = v[0]
score[k] = v[1]
p.close()
p.join()
score_err_sum=np.zeros(max_num_cluster)
if PRINTSHOW==True:
print 'Finding knee points of log likelihood...'
for i in range(max_num_cluster):
a_0=score[:(i)]
if len(a_0)>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(a_0)),a_0)
sqr_sum_err0=sum(((slope*np.arange(len(a_0))+ intercept)-a_0)**2)
else:
sqr_sum_err0=0
a_1=score[(i):]
if len(a_1)>1:
slope, intercept, r_value, p_value, std_err = stats.linregress(range(len(a_1)),a_1)
sqr_sum_err1=sum(((slope*np.arange(len(a_1))+ intercept)-a_1)**2)
else:
sqr_sum_err1=0
score_err_sum[i]=sqr_sum_err0+sqr_sum_err1
# Optimum number of clusters.
min_idx=np.argmin(score_err_sum)
opt_num_cluster=min_idx+1
if PRINTSHOW==True:
print 'opt_num_cluster: ' , opt_num_cluster
if est_method=='kmean':
label=model_set[min_idx].labels_
elif est_method=='gmm':
label=model_set[min_idx].predict(obs)
else:
raise NameError('not supported est_method')
return label,opt_num_cluster, model_set[min_idx],score,score_err_sum
########################################################################
# Function Irregualr event table retrieval
########################################################################
def mesurement_to_states(measurement_point_set,alpha=0.5,max_num_cluster=8,est_method='kmean',PARALLEL=False):
print '==============================================================================='
print 'Mapping measurement to states by ', est_method, ', Parallel Enabled: ',str(PARALLEL)
print '==============================================================================='
model_set=[]
label_set=[]
irr_event_set=[]
start_t=time.time()
for k,data_set in enumerate(measurement_point_set):
print 'working on ',k,'th measurement point... '
val_new=data_set[1]
val_set=list(set(val_new))
num_of_discrete_val=len(val_set)
t_new=data_set[0]
# average sampling interval
sr=(t_new[-1]-t_new[0]) /len(t_new)
# transformed observatoin data for state retrieval
if num_of_discrete_val<10:
print 'the number of discrete values are less than 10'
print 'no states retrieval needed '
cnt_num_occurances=[len(np.nonzero(val_new==state)[0]) for state in val_set]
#import pdb;pdb.set_trace()
label=val_new
label_set.append(np.vstack([t_new, label]))
min_label_idx=val_set[np.argmin(cnt_num_occurances)]
irregualr_event=np.zeros(label.shape)
irregualr_event[label==min_label_idx]=1
elif num_of_discrete_val<100:
print 'the number of discrete values are less than 100'
print 'use K-MEAN clustering by default '
obs=abs(np.diff(val_new))[:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum=state_retrieval(obs,max_num_cluster,est_method='kmean',PARALLEL=PARALLEL,PRINTSHOW=False)
max_label_idx=np.argmax(model.cluster_centers_)
max_label=np.zeros(label.shape)
max_label[label==max_label_idx]=1
irregualr_event=np.r_[max_label[0],max_label]
else:
obs=abs(np.diff(val_new))[:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum=state_retrieval(obs,max_num_cluster,est_method=est_method,PARALLEL=PARALLEL,PRINTSHOW=False)
#import pdb;pdb.set_trace()
if est_method=='kmean':
#label,opt_num_cluster,model,score,score_err_sum=state_retrieval_kmean(obs,max_num_cluster)
max_label_idx=np.argmax(model.cluster_centers_)
elif est_method=='gmm':
#label,opt_num_cluster,model,score,score_err_sum=state_retrieval(obs,max_num_cluster)
max_label_idx=np.argmax(model.means_)
else:
raise NameError('not supported est_method')
model_set.append(model)
label_set.append(np.vstack([t_new[1:], label]))
# Irregualr state mapping
max_label=np.zeros(label.shape)
max_label[label==max_label_idx]=1
irregualr_event=np.r_[max_label[0],max_label]
irregualr_event_inter_arr_times=np.diff(t_new[irregualr_event==1])
if (len(irregualr_event_inter_arr_times)>10) and (num_of_discrete_val>10):
loc_x, scale_x =stats.expon.fit(irregualr_event_inter_arr_times)
inter_arr_times_alpha=stats.expon.ppf(alpha,loc=loc_x,scale=scale_x)
window_size=int(inter_arr_times_alpha/sr)
rw_irregualr_event=rolling_window_label_binary(irregualr_event,window_size)[:,0]
irr_event_set.append(np.vstack([t_new, rw_irregualr_event]))
else:
irr_event_set.append(np.vstack([t_new, irregualr_event]))
end_proc_t=time.time()
print 'the time of processing mesurement_to_states ', end_proc_t-start_t, ' sec'
return irr_event_set
#########################################################################
#########################################################################
# Binary Table Extraction
#########################################################################
def get_common_time_reference(ts_list):
list_len = len(ts_list)
start_ts_list = np.array([ts_list[i][0] for i in range(list_len)])
end_ts_list = np.array([ts_list[i][-1] for i in range(list_len)])
common_start_ts = np.max(start_ts_list)
common_end_ts = np.min(end_ts_list)
common_ts = []
for i in range(list_len):
#common_ts = common_ts + ts_list[i]
common_ts = np.hstack([common_ts,ts_list[i]])
# remove duplicated ts
common_ts = np.asarray(sorted(list(set(common_ts))))
common_ts = np.delete(common_ts,np.nonzero(common_ts < common_start_ts)[0])
common_ts = np.delete(common_ts,np.nonzero(common_ts > common_end_ts)[0])
return common_ts
def interpolate_state_nearest(available_ts,available_values, intplt_ts):
f = interp1d(available_ts,available_values,kind='nearest')
## Interpolation
print 'len of intplt points: ' + str(len(intplt_ts))
intplt_values = f(intplt_ts)
return intplt_values
def find_consecutive_dup_rows(mat):
nrows = len(mat)
dup_idx_list = []
for r_idx in range(nrows-1,0,-1):
if all(mat[r_idx] == mat[r_idx-1]):
dup_idx_list.append(r_idx)
return dup_idx_list
def binary_table_extract(irr_event_set, binary_state_cut_off=-1, rm_dup=False):
#print 'this extract binary state based on state transition of composit binary states (default) or reference time using interploation '
#print 'return N-by-P matrix where N is the number of transitions and P is the number of sensors'
num_of_sensors=len(irr_event_set)
#num_of_transition=1 # to be updated
state_table = []
"""
Steps to find state transition
1. Find the common time frame of all sensors
start = max{ts[0]} for all ts: list of time of each sensor
end = min{ts[-1]} for all ts: list of time of each sensor
2. Find all ts that at least one sensor data available [start,end]
TS = Union of all ts, within [start_end]
3. Interpolate sensor state for each sensor during [start,end]
Before interpolation, convert states into binary (optional)
4. Remove duplicated state transitions (optional)
"""
### Step 1+2: Get common time reference
ts_list = []
for i in range(num_of_sensors):
ts_list.append(irr_event_set[i][0])
print ts_list
common_ts = get_common_time_reference(ts_list)
### interpolate state for each sensor, during common_ts
for i in range(num_of_sensors):
# convert state probability to binary state
if (binary_state_cut_off >= 0):
positive_prob_idx=np.nonzero(irr_event_set[i][1] > binary_state_cut_off)[0]
irr_event_set[i][1][:]=0
irr_event_set[i][1][positive_prob_idx]=1
intplt_states = interpolate_state_nearest(irr_event_set[i][0],irr_event_set[i][1],common_ts)
state_table.append(intplt_states)
state_table = np.asarray(state_table).T
# column: sensor, row: state sample
### Remove duplicated state transitions
if rm_dup==True:
dup_idx_list = find_consecutive_dup_rows(state_table)
state_table = np.delete(state_table,dup_idx_list,axis=0)
common_ts = np.delete(common_ts,dup_idx_list)
return common_ts,state_table
###################################################################################
# Probability Computation Functions
###################################################################################
# Example Codes
###################################################################################
# data_mat_set=np.array([[1,1,0],[1,1,0],[0,1,0],[1,1,1],[0,1,0],[1,0,0],[0,0,0],[0,1,0]])
# data_mat_set2=np.array([[1,11,100],[1,11,100],[0,11,100],[1,11,101],[0,11,100],[1,10,100],[0,10,100],[0,11,100]])
#data_mat_set=np.array([[1,1,0],[1,1,0],[0,1,0],[1,1,1],[0,1,0],[1,0,0],[0,0,0],[0,1,0]])
#compute_joint_prob(data_mat_set,[0,1],[[0,1],[0]])
#compute_cond_prob(data_mat_set,[0],[[1]],[1],[[1]])
#state_tmp,prob_tmp=compute_effect_prob(data_mat_set,[0],[1],[[1]])
#state_tmp,likelihood_tmp=compute_cause_likelihood(data_mat_set,[0],[1],[[1]])
def compute_joint_prob(data_mat,state_idx_set,state_val_set):
num_samples=data_mat.shape[0]
num_states=data_mat.shape[1]
if len(state_idx_set)!=len(state_val_set):
raise NameError('the length of state_set and state_val must be same')
joint_idx=set(range(num_samples))
for k,state_idx in enumerate(state_idx_set):
samples=data_mat[:,state_idx]
sub_joint_idx=set([])
for state_val in state_val_set[k]:
sub_joint_idx=sub_joint_idx| set(np.nonzero(samples==state_val)[0])
joint_idx=joint_idx & sub_joint_idx
joint_prob=len(joint_idx)/num_samples
if num_samples==0:
return 0
else:
return joint_prob
#def compute_cond_prob(data_mat,state_idx_set,state_val_set,cond_idx_set):
def compute_cond_prob(data_mat,state_idx_set,state_val_set,cond_idx_set,cond_val_set):
joint_state_idx_set=state_idx_set+cond_idx_set
joint_state_val_set=state_val_set+cond_val_set
all_prob=compute_joint_prob(data_mat,joint_state_idx_set,joint_state_val_set)
partial_prob=compute_joint_prob(data_mat,cond_idx_set,cond_val_set)
if partial_prob==0:
return 0
else:
return all_prob/partial_prob
def compute_effect_prob(data_mat,effect_idx_set,cause_idx_set,cause_val_set):
# find f_B*(A)=P(A|B=B*)
# generate a set of all possible states
state_set=[]
for k,idx in enumerate(effect_idx_set):
#print idx, ':', list(set(data_mat[:,idx]))
#set(list(data_mat[idx,:]))
if k==0:
state_set=list(set(data_mat[:,idx]))
else:
state_set=pair_in_idx(state_set,list(set(data_mat[:,idx])))
prob_set=[]
for state_val in state_set:
#import pdb;pdb.set_trace()
if isinstance(state_val,list):
input_val_set=[[val] for val in state_val]
else:
input_val_set=[[state_val]]
prob_temp=compute_cond_prob(data_mat,effect_idx_set,input_val_set,cause_idx_set,cause_val_set)
prob_set.append(prob_temp)
return state_set,prob_set
def compute_cause_likelihood(data_mat,cause_idx_set,effect_idx_set,effect_val_set):
# find f_A*(B)=P(A=A*|B)
# generate a set of all possible states
state_set=[]
for k,idx in enumerate(cause_idx_set):
#print idx, ':', list(set(data_mat[:,idx]))
#set(list(data_mat[idx,:]))
#import pdb;pdb.set_trace()
if k==0:
state_set=list(set(data_mat[:,idx]))
else:
state_set=pair_in_idx(state_set,list(set(data_mat[:,idx])))
likelihood_set=[]
for state_val in state_set:
#import pdb;pdb.set_trace()
if isinstance(state_val,list):
input_val_set=[[val] for val in state_val]
else:
input_val_set=[[state_val]]
prob_temp=compute_cond_prob(data_mat,effect_idx_set,effect_val_set,cause_idx_set,input_val_set)
likelihood_set.append(prob_temp)
return state_set,likelihood_set
def irr_state_mapping(state_mat,weight_coeff=10):
peak_prob=np.array([compute_joint_prob(state_mat,[k],[[PEAK]]) for k in range(state_mat.shape[1])])
low_prob=np.array([compute_joint_prob(state_mat,[k],[[LOW_PEAK]]) for k in range(state_mat.shape[1])])
no_prob=np.array([compute_joint_prob(state_mat,[k],[[NO_PEAK]]) for k in range(state_mat.shape[1])])
irr_state_prob=np.zeros(state_mat.shape[1])
irr_state_mat=np.zeros(state_mat.shape)
skewness_metric_sort=np.zeros(peak_prob.shape)
idx_state_map=[PEAK,NO_PEAK,LOW_PEAK]
for k,prob_set in enumerate(np.vstack([peak_prob,no_prob,low_prob]).T):
# Processing probaiblity data for each sensor
prob_sort_idx=np.argsort(prob_set)
prob_sort=prob_set[prob_sort_idx]
#import pdb;pdb.set_trace()
# if k==16:
# import pdb;pdb.set_trace()
if weight_coeff*(prob_sort[0]+prob_sort[1]) <prob_sort[2]:
irr_prob=prob_sort[0]+prob_sort[1]
reg_prob=prob_sort[2]
irr_state_mat[(state_mat[:,k]==idx_state_map[prob_sort_idx[0]]) | (state_mat[:,k]==idx_state_map[prob_sort_idx[1]]),k]=1
else:
irr_prob=prob_sort[0]
reg_prob=prob_sort[1]+prob_sort[2]
irr_state_mat[state_mat[:,k]==idx_state_map[prob_sort_idx[0]],k]=1
temp=abs(irr_prob-reg_prob)/np.sqrt(reg_prob*irr_prob)
if temp<np.inf:
skewness_metric_sort[k]=temp
irr_state_prob[k]=irr_prob
desc_sort_idx=np.argsort(-1*skewness_metric_sort)
return irr_state_mat
#return irr_state_mat,irr_state_prob,skewness_metric_sort[desc_sort_idx],desc_sort_idx
###################################################################################
# Probability Analysis Functions
###################################################################################
def time_effect_analysis(data_mat,data_name,avgtime_names,s_name,DO_PLOT=False):
s_idx=data_name.index(s_name)
t_idx=[[data_name.index(ntemp)] for ntemp in avgtime_names] #['MTH', 'WD', 'HR']
m_list=list(set(data_mat[:,data_name.index('MTH')]))
state_list=list(set(data_mat[:,s_idx]))
s_prob_log=[[]]*len(yearMonths)
print 'Monthy analysis...'
for m_idx in yearMonths:
print monthDict[m_idx]
if m_idx not in m_list:
print 'no data for this month'
print '-----------------------------'
continue
prob_map=np.zeros([len(state_list),len(Week),len(DayHours)])
#for h_idx in DayHours:
start_t=time.time()
for dh_pair in pair_in_idx(Week,DayHours):
#state_tmp,prob_tmp=compute_effect_prob(data_mat,[s_idx],t_idx,[[m_idx],Weekday,[h_idx]])
state_tmp,prob_tmp=compute_effect_prob(data_mat,[s_idx],t_idx,[[m_idx],[dh_pair[0]],[dh_pair[1]]])
for state in state_list:
prob_map[state_list.index(state) ,dh_pair[0],dh_pair[1]]=prob_tmp[state_tmp.index(state)]
end_t=time.time()
print 'spend ' ,end_t-start_t,'secs'
s_prob_log[m_idx]=prob_map
#m_prob_log
print '-----------------------------'
#s_m_data_valid=[ False if sum(prob)==0 else True for prob in s_prob_log]
valid_mon_list=[month_val for month_val in yearMonths if len(s_prob_log[month_val])>0]
if DO_PLOT==True:
plot_time_effect(s_name,state_list,valid_mon_list,s_prob_log)
valid_mon_pair=pair_in_idx(valid_mon_list)
time_effect_mat_dist=np.zeros([len(state_list),len(valid_mon_pair)])
for i,state_idx in enumerate(range(len(state_list))):
for j,mon_idx_pair in enumerate(valid_mon_pair):
val_temp=norm(np.array(s_prob_log[mon_idx_pair[0]][state_idx])-np.array(s_prob_log[mon_idx_pair[1]][state_idx]))
time_effect_mat_dist[i,j]=val_temp
score_in_structure=[]
for k,mon_idx in enumerate(valid_mon_list):
score_val=[]
for state_idx,state_val in enumerate(state_list):
mat_input=np.array(s_prob_log[mon_idx][state_idx])
dst_col=find_norm_dist_matrix(mat_input)
dst_row=find_norm_dist_matrix(mat_input.T)
score_val.append(dst_col.mean()+dst_row.mean())
score_in_structure.append(np.sum(score_val))
return state_list,s_prob_log,time_effect_mat_dist,score_in_structure,valid_mon_list,state_list
def plot_time_effect(s_name,state_list,valid_mon_list,s_prob_log):
plt.figure(s_name)
for i,state_val in enumerate(state_list):
for j, mon_val in enumerate(valid_mon_list):
plt_idx=len(valid_mon_list)*i+j+1
plt.subplot(len(state_list),len(valid_mon_list),plt_idx)
im = plt.imshow(s_prob_log[mon_val][state_list.index(state_val)],interpolation='none',vmin=0, vmax=1,aspect='auto')
if set(stateDict.keys())==set(state_list):
plt.title(monthDict[mon_val]+' , state: '+ stateDict[state_val])
else:
plt.title(monthDict[mon_val]+' , state: '+ str(state_val))
plt.yticks(weekDict.keys(),weekDict.values())
plt.colorbar()
#plt.xlabel('Hours of day')
if i == len(state_list) - 1:
plt.xlabel('Hours of day')
#plt.subplots_adjust(right=0.95)
#cbar_ax = plt.add_axes([0.95, 0.15, 0.05, 0.7])
#cax,kw = mpl.colorbar.make_axes([ax for ax in pl t.axes().flat])
#plt.colorbar(im, cax=cax, **kw)
#plt.colorbar(im,cbar_ax)
def time_effect_analysis_all(data_mat,data_name,avgtime_names,avgsensor_names):
monthly_structure_score=[]
monthly_variability=[]
for s_name in avgsensor_names:
print s_name
print '==============================='
state_list,s_prob_log,time_effect_mat_dist,score_in_structure,valid_mon_list,state_list\
=time_effect_analysis(data_mat,data_name,avgtime_names,s_name,DO_PLOT=False)
monthly_variability.append(time_effect_mat_dist.mean())
monthly_structure_score.append(score_in_structure)
return np.array(monthly_variability),np.array(monthly_structure_score)
###############################################################################################
# Analysis- Sensitivity of state distribution for parameters\
# Use Bhattacharyya distance to compute the distance between two probabilities
#D_b(p,q)= - ln (BC(p,q)) where BC(p,q)=\sum_x \sqrt{p(x)q(x)}
# Due to triagnular propety we use Hellinger distance , D_h(p,q)=\sqrt{1-BC(p,q)}
###############################################################################################
def param_sensitivity(data_mat, data_name,avgsensor_names,wfactor,dst_type):
wfactor_prob_map=[]
wfactor_state_map=[]
wfactor_sensitivity=[]
wfactor_idx=data_name.index(wfactor)
wfactor_list=list(set(data_mat[:,wfactor_idx]))
for k,s_name in enumerate(avgsensor_names):
s_idx=data_name.index(s_name)
state_list=list(set(data_mat[:,s_idx]))
prob_map=np.zeros([len(state_list),len(wfactor_list)])
state_map=np.zeros([len(state_list),len(wfactor_list)])
for i,wfactor_state in enumerate(wfactor_list):
state_tmp,prob_tmp=compute_effect_prob(data_mat,[s_idx],[[wfactor_idx]],[[wfactor_state]])
state_map[:,i]=state_tmp
prob_map[:,i]=prob_tmp
wfactor_prob_map.append(np.round(prob_map,2))
wfactor_state_map.append(state_map)
D_=[]
for probset in pair_in_idx(prob_map.T,prob_map.T):
BC=min(1,sum(np.sqrt(probset[0]*probset[1])))
if dst_type=='b':
D_.append(-1*np.log(BC))
elif dst_type=='h':
D_.append(np.sqrt(1-BC))
elif dst_type=='v':
D_.append(0.5*min(1,sum(abs(probset[0]-probset[1]))))
else:
print 'error'; return
#import pdb;pdb.set_trace()
#BC=np.min(1,sum(np.sqrt(probset[0]*probset[1])))
#if dst_type=='b':
# D_=[-1*np.log(np.min(1,sum(np.sqrt(probset[0]*probset[1])))) for probset in pair_in_idx(prob_map.T,prob_map.T)]
#elif dst_type=='h':
# D_=[np.sqrt(1-np.min(1,sum(np.sqrt(probset[0]*probset[1])))) for probset in pair_in_idx(prob_map.T,prob_map.T)]
#else:
# print 'error'; return
wfactor_sensitivity.append(np.mean(D_))
return wfactor_prob_map,wfactor_state_map, wfactor_sensitivity,wfactor_list
###############################################################################################
def plot_weather_sensitivity(wf_type,wf_prob_map,wf_state_map,wf_sensitivity,wf_list,\
avgsensor_names,Conditions_dict,Events_dict,sort_opt='desc',num_of_picks=9):
# Plotting bar graph
if sort_opt=='desc':
argsort_idx=np.argsort(wf_sensitivity)[::-1]
elif sort_opt=='asc':
argsort_idx=np.argsort(wf_sensitivity)
else:
print 'error in type'
return
wf_sort_idx=np.argsort(wf_list)
width = 0.5 # the width of the bars
color_list=['b','g','r','c','m','y','k','w']
num_col=floor(np.sqrt(num_of_picks))
num_row=ceil(num_of_picks/num_col)
for i in range(num_of_picks):
subplot(num_col,num_row,i+1)
bar_idx=argsort_idx[i]
prob_bar_val=wf_prob_map[bar_idx]
prob_bar_name=avgsensor_names[bar_idx]
prob_bar_wf_state=[str(wf) for wf in np.array(wf_list)[wf_sort_idx]]
prob_bar_sensor_state=wf_state_map[bar_idx]
N =prob_bar_sensor_state.shape[0]
M =prob_bar_sensor_state.shape[1]
ind = np.arange(N) # the x locations for the groups
state_ticks=[]
state_label=[]
for k,(val,state) in enumerate(zip(prob_bar_val[:,wf_sort_idx].T,prob_bar_sensor_state[:,wf_sort_idx].T)):
x=ind+k*5
x_sort_idx=np.argsort(state)
bar(x, val[x_sort_idx], width, color=color_list[k%len(color_list)])
state_ticks=state_ticks+list(x)
state_label=state_label+list(state[x_sort_idx].astype(int))
#category_ticks=category_ticks+[int(mean(x))]
if wf_type=='T':
start_str='TP';end_str='C'
statek=prob_bar_wf_state[k];fontsize_val=10
init_str=start_str+'= '+statek+ end_str
elif wf_type=='D':
start_str='DP';end_str='C'
statek=prob_bar_wf_state[k];fontsize_val=10
init_str=start_str+'= '+statek+ end_str
elif wf_type=='H':
start_str='HD';end_str='%'
statek=prob_bar_wf_state[k];fontsize_val=10
init_str=start_str+'= '+statek+ end_str
elif wf_type=='E':
start_str='EV';end_str=''
statek=\
Events_dict.keys()[Events_dict.values().index(int(prob_bar_wf_state[k]))];fontsize_val=6
if statek=='': statek='none'
#statek=prob_bar_wf_state[k];fontsize_val=10
init_str=start_str+'= '+statek+ end_str
elif wf_type=='C':
start_str='CD';end_str=''
statek=prob_bar_wf_state[k];fontsize_val=10
#statek=\
#Conditions_dict.keys()[Conditions_dict.values().index(int(prob_bar_wf_state[k]))];fontsize_val=6
if statek=='': statek='none'
init_str=''
else:
print 'no such type'
return
if k==0:
category_str= init_str
else:
category_str=statek+ end_str
plt.text(int(mean(x)),1.1,category_str,fontsize=fontsize_val)
plt.xticks(state_ticks,state_label )
plt.xlabel('State',fontsize=10)
plt.ylabel('Probability',fontsize=10)
ylim([0,1.3]); title(prob_bar_name,fontsize=10)
def wt_sensitivity_analysis(data_state_mat,data_time_mat,data_weather_mat,sensor_names,time_names,\
Conditions_dict,Events_dict,bldg_tag,trf_tag,weather_names,dict_dir,dst_t='h'):
import pprint
import radar_chart
data_mat = np.hstack([data_state_mat,data_time_mat])
data_name = sensor_names+time_names
print 'Parameter sensitivty for Months....'
mth_prob_map,mth_state_map, mth_sensitivity,mth_list\
= param_sensitivity(data_mat,data_name,sensor_names,'MTH',dst_type=dst_t)
print 'Parameter sensitivty for Days....'
wday_prob_map,wday_state_map,wday_sensitivity,wday_list\
= param_sensitivity(data_mat,data_name,sensor_names,'WD',dst_type=dst_t)
print 'Parameter sensitivty for Hours....'
dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list\
= param_sensitivity(data_mat,data_name,sensor_names,'HR',dst_type=dst_t)
#Month Sensitivty bar Plot.
tf_tuple_mth=('MTH',mth_prob_map,mth_state_map,mth_sensitivity,mth_list)
tf_tuple_wday=('WD',wday_prob_map,wday_state_map,wday_sensitivity,wday_list)
tf_tuple_dhr=('HR',dhr_prob_map,dhr_state_map,dhr_sensitivity,dhr_list)
tf_sstv_tuple=np.array([tf_tuple_mth[3],tf_tuple_wday[3],tf_tuple_dhr[3]])
max_tf_sstv=tf_sstv_tuple[tf_sstv_tuple<np.inf].max()*2
tf_sstv_tuple[tf_sstv_tuple==np.inf]=max_tf_sstv
tf_sstv_total=np.sum(tf_sstv_tuple,0)
arg_idx_s=np.argsort(tf_sstv_total)[::-1]
arg_idx_is=np.argsort(tf_sstv_total)
num_of_picks=9
print 'Most time sensitive sensors'
print '---------------------------------------------'
Time_Sensitive_Sensors=list(np.array(sensor_names)[arg_idx_s[0:num_of_picks]])
pprint.pprint(Time_Sensitive_Sensors)
####################################################################
## Rador Plotting for Hour_Sensitive_Sensors
####################################################################
sensor_no = len(sensor_names)
# convert 'inf' to 1
sen_mth = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_mth[3]]
sen_wday = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_wday[3]]
sen_dhr = [max_tf_sstv if val == float("inf") else val for val in tf_tuple_dhr[3]]
SEN = [[sen_mth[i], sen_wday[i], sen_dhr[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-num_of_picks:] # Best 9 sensors
spoke_labels = ["Month", "Day", "Hour"]
data = [SEN[i] for i in idx]
sensor_labels = [sensor_names[i] for i in idx]
radar_chart.subplot(data, spoke_labels, sensor_labels, saveto=dict_dir+bldg_tag+trf_tag+'time_radar.png')
######################################################################
#1. effect prob - weather dependecy analysis
######################################################################
data_mat = np.hstack([data_state_mat,data_weather_mat])
# Temporary for correcting month change
#data_mat[:,-3]=data_mat[:,-3]-1
data_name = sensor_names+weather_names
# State classification of weather data
temp_idx=data_name.index('TemperatureC')
dewp_idx=data_name.index('Dew PointC')
humd_idx=data_name.index('Humidity')
evnt_idx=data_name.index('Events')
cond_idx=data_name.index('Conditions')
######################################################################
# Weather state classification
######################################################################
weather_dict={}
for class_idx in [temp_idx,dewp_idx,humd_idx]:
obs=data_mat[:,class_idx][:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum=\
state_retrieval(obs,max_num_cluster=10,est_method='kmean',PARALLEL=IS_USING_PARALLEL_OPT,PRINTSHOW=True)
if class_idx==temp_idx:
weather_dict.update({'Temp':model.cluster_centers_})
elif class_idx==dewp_idx:
weather_dict.update({'Dewp':model.cluster_centers_})
elif class_idx==humd_idx:
weather_dict.update({'Humd':model.cluster_centers_})
else:
print 'not found'
for label_id in range(label.max()+1):
label_idx=np.nonzero(label==label_id)[0]
data_mat[label_idx,class_idx]=np.round(model.cluster_centers_[label_id][0])
##################################################
# Reclassify the Condition states into clarity of the sky
##################################################
#Conditions_dict=data_dict['Conditions_dict'].copy()
#data_mat = np.hstack([avgdata_state_mat,avgdata_weather_mat])
cond_state=[[]]*6
cond_state[5]=['Clear'] # Clear
cond_state[4]=['Partly Cloudy','Scattered Clouds'] # 'Partly Cloudy'
cond_state[3]=['Mostly Cloudy','Overcast'] # 'Overcast'
cond_state[2]=['Light Drizzle','Mist', 'Shallow Fog', 'Patches of Fog',\
'Light Snow', 'Light Freezing Rain', 'Light Rain Showers','Light Freezing Fog','Light Snow Showers', 'Light Rain'] # Light Rain
cond_state[1]=['Rain','Rain Showers','Thunderstorms and Rain'\
,'Heavy Rain','Heavy Rain Showers','Drizzle', 'Heavy Drizzle', 'Fog'] # Heavy Rain
cond_state[0]=['Unknown']
cond_data_array=data_mat[:,cond_idx].copy()
for k in range(len(cond_state)):
for cond_str in cond_state[k]:
cond_val_old=Conditions_dict[cond_str]
idx_temp=np.nonzero(cond_data_array==cond_val_old)[0]
if len(idx_temp)>0:
data_mat[idx_temp,cond_idx]=k
#plt.plot(data_mat[:,cond_idx],'.')
Conditions_dict_temp={}
Conditions_dict_temp.update({'Clear':5})
Conditions_dict_temp.update({'Partly Cloudy':4})
Conditions_dict_temp.update({'Overcast':3})
Conditions_dict_temp.update({'Light Rain':2})
Conditions_dict_temp.update({'Heavy Rain':1})
Conditions_dict_temp.update({'Unknown':0})
# Abbr' of weather factor type is
weather_dict.update({'Cond':Conditions_dict_temp})
####################################################################
# Reclassify the Event states into rain/snow/fog weather conditons
####################################################################
event_state=[[]]*4
event_state[0]=[''] # No event
event_state[1]=['Rain-Snow','Snow'] # Snow
event_state[2]=['Rain','Thunderstorm','Rain-Thunderstorm'] # Rain
event_state[3]=['Fog','Fog-Rain'] # Fog
event_data_array=data_mat[:,evnt_idx].copy()
for k in range(len(event_state)):
for event_str in event_state[k]:
event_val_old=Events_dict[event_str]
idx_temp=np.nonzero(event_data_array==event_val_old)[0]
if len(idx_temp)>0:
data_mat[idx_temp,evnt_idx]=k
Events_dict_temp={}
Events_dict_temp.update({'NoEvent':0})
Events_dict_temp.update({'Snow':1})
Events_dict_temp.update({'Rain':2})
Events_dict_temp.update({'Fog':3})
weather_dict.update({'Event':Events_dict_temp})
# T,D,H,E,C
print 'Parameter sensitivty for TemperatureC....'
tempr_prob_map,tempr_state_map, tempr_sensitivity,tempr_list\
= param_sensitivity(data_mat,data_name,sensor_names,'TemperatureC',dst_type=dst_t)
print 'Parameter sensitivty for Dew PointC....'
dewp_prob_map,dewp_state_map, dewp_sensitivity, dewp_list\
= param_sensitivity(data_mat,data_name,sensor_names,'Dew PointC',dst_type=dst_t)
print 'Parameter sensitivty for Humidity....'
humd_prob_map,humd_state_map, humd_sensitivity,humd_list\
= param_sensitivity(data_mat,data_name,sensor_names,'Humidity',dst_type=dst_t)
print 'Parameter sensitivty for Events....'
event_prob_map,event_state_map,event_sensitivity, event_list\
= param_sensitivity(data_mat,data_name,sensor_names,'Events',dst_type=dst_t)
print 'Parameter sensitivty for Conditions....'
cond_prob_map,cond_state_map,cond_sensitivity,cond_list\
= param_sensitivity(data_mat,data_name,sensor_names,'Conditions',dst_type=dst_t)
wf_tuple_t=('T',tempr_prob_map,tempr_state_map,tempr_sensitivity,tempr_list)
wf_tuple_d=('D',dewp_prob_map,dewp_state_map,dewp_sensitivity,dewp_list)
wf_tuple_h=('H',humd_prob_map,humd_state_map,humd_sensitivity,humd_list)
wf_tuple_e=('E',event_prob_map,event_state_map,event_sensitivity,event_list)
wf_tuple_c=('C',cond_prob_map,cond_state_map,cond_sensitivity,cond_list)
wf_sstv_tuple=np.array([wf_tuple_t[3],wf_tuple_d[3],wf_tuple_h[3],wf_tuple_e[3],wf_tuple_c[3]])
max_wf_sstv=wf_sstv_tuple[wf_sstv_tuple<np.inf].max()*2
wf_sstv_tuple[wf_sstv_tuple==np.inf]=max_wf_sstv
wf_sstv_total=np.sum(wf_sstv_tuple,0)
arg_idx_s=np.argsort(wf_sstv_total)[::-1]
print 'Most weather sensitive sensors'
print '---------------------------------------------'
Weather_Sensitive_Sensors=list(np.array(sensor_names)[arg_idx_s[0:num_of_picks]])
pprint.pprint(Weather_Sensitive_Sensors)
####################################################################
## Radar Plotting for Weather_Sensitive_Sensors
####################################################################
sensor_no = len(sensor_names)
# convert 'inf' to 1
sen_t = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_t[3]]
sen_d = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_d[3]]
sen_h = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_h[3]]
sen_e = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_e[3]]
sen_c = [max_wf_sstv if val == float("inf") else val for val in wf_tuple_c[3]]
SEN = [[sen_t[i], sen_d[i], sen_h[i], sen_e[i], sen_c[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-num_of_picks:] # Best 6 sensors
spoke_labels = ["Temperature", "Dew Point", "Humidity", "Events", "Conditions"]
data = [SEN[i] for i in idx]
sensor_labels = [sensor_names[i] for i in idx]
import radar_chart
radar_chart.subplot(data, spoke_labels, sensor_labels, saveto=dict_dir+bldg_tag+trf_tag+'weather_radar.png')
#radar_chart.plot(data, spoke_labels, sensor_labels, saveto="weather_radar.png")
####################################################################
## Bar Plotting for Weather and time sensitive_Sensors
####################################################################
import bar_chart
# Load from binaries
#sen_mth sen_wday sen_dhr sen_t sen_d sen_h sen_e sen_c
SEN = [[sen_mth[i],sen_wday[i],sen_dhr[i],sen_t[i], sen_d[i], sen_h[i], sen_e[i], sen_c[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-15:] # Best 15 sensors
#data = [[TOTAL_SEN[i] for i in idx]] * 8
data = [[np.array(SEN)[i,k] for i in idx] for k in range(8)]
labels = [[sensor_names[i] for i in idx]] * 8
titles = ["Month", "Day", "Hour", "Temperature", "Dew Point", "Humidity", "Events", "Conditions"]
colors = ["b" if i < 3 else "g" for i in range(8)]
bar_chart.plot(data, labels, titles, colors, grid=True, savefig=dict_dir+bldg_tag+trf_tag+'bar.png', savereport=dict_dir+bldg_tag+trf_tag+'all_bar.csv')
####################################################################
## Rador Plotting for Time Weather_Sensitive_Sensors
####################################################################
wtf_sstv_total=wf_sstv_total+tf_sstv_total
arg_idx_s=np.argsort(wtf_sstv_total)[::-1]
#arg_idx_is=np.argsort(wtf_sstv_total)
num_of_picks=9
print 'Most time-weather sensitive sensors'
print '---------------------------------------------'
WT_Sensitive_Sensors=list(np.array(sensor_names)[arg_idx_s[0:num_of_picks]])
pprint.pprint(WT_Sensitive_Sensors)
sensor_no = len(sensor_names)
# convert 'inf' to 1
SEN = [[sen_mth[i], sen_wday[i], sen_dhr[i],sen_t[i], sen_d[i], sen_h[i], sen_e[i], sen_c[i]] for i in range(sensor_no)]
TOTAL_SEN = np.array([sum(SEN[i]) for i in range(sensor_no)])
idx = np.argsort(TOTAL_SEN)[-num_of_picks:] # Best 9 sensors
spoke_labels = ["Month", "Day", "Hour","Temperature", "Dew Point", "Humidity", "Events", "Conditions"]
data = [SEN[i] for i in idx]
sensor_labels = [sensor_names[i] for i in idx]
radar_chart.subplot(data, spoke_labels, sensor_labels, saveto=dict_dir+bldg_tag+trf_tag+'time_weather_radar.png')
fig=plt.figure()
idx = np.argsort(TOTAL_SEN)[-(min(len(TOTAL_SEN),50)):] # Best 50 sensors
twf_sstv_tuple = np.array([SEN[i] for i in idx]).T
sensor_labels = [sensor_names[i] for i in idx]
#twf_sstv_tuple=np.vstack([tf_sstv_tuple,wf_sstv_tuple])
vmax_=twf_sstv_tuple.max()
vmin_=twf_sstv_tuple.min()
im=plt.imshow(twf_sstv_tuple,interpolation='none',vmin=vmin_, vmax=vmax_,aspect='equal')
y_label=['MTH', 'WD', 'HR','TemperatureC','Dew PointC','Humidity','Events', 'Conditions']
y_ticks=range(len(y_label))
plt.yticks(y_ticks,y_label)
x_label=sensor_labels
x_ticks=range(len(sensor_labels))
plt.xticks(x_ticks,x_label,rotation=270, fontsize="small")
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("top", "15%", pad="30%")
plt.colorbar(im, cax=cax,orientation='horizontal')
plt.savefig(dict_dir+bldg_tag+trf_tag+'time_weather_hmap.png')
wtf_tuples={}
wtf_tuples.update({'month':tf_tuple_mth})
wtf_tuples.update({'day':tf_tuple_wday})
wtf_tuples.update({'hour':tf_tuple_dhr})
wtf_tuples.update({'month':tf_tuple_mth})
wtf_tuples.update({'t':wf_tuple_t})
wtf_tuples.update({'d':wf_tuple_d})
wtf_tuples.update({'h':wf_tuple_h})
wtf_tuples.update({'e':wf_tuple_e})
wtf_tuples.update({'c':wf_tuple_c})
return wtf_tuples,weather_dict
def check_cond_state(all_cond_name,cond_state):
no_assn_key=[]
for key in all_cond_name:
print '------------------'
print key
num_cnt=0
for k in range(len(cond_state)):
if key in cond_state[k]:
num_cnt=num_cnt+1
print num_cnt
if num_cnt==0:
no_assn_key.append(key)
print '------------------'
print 'unassigned cond key ' ,no_assn_key
return no_assn_key
#all_cond_name=list(set(GW1_.Conditions_dict.keys()+GW2_.Conditions_dict.keys()\
#+VAK2_.Conditions_dict.keys()+VAK1_.Conditions_dict.keys()))
def check_event_state(all_event_name,event_state):
no_assn_key=[]
for key in all_event_name:
print '------------------'
print key
num_cnt=0
for k in range(len(event_state)):
if key in event_state[k]:
num_cnt=num_cnt+1
print num_cnt
if num_cnt==0:
no_assn_key.append(key)
print '------------------'
print 'unassigned event key ' ,no_assn_key
return no_assn_key
#all_event_name=list(set(GW1_.Events_dict.keys()+GW2_.Events_dict.keys()\
#+VAK2_.Events_dict.keys()+VAK1_.Events_dict.keys()))
def weather_convert(wdata_mat,wdata_name, Conditions_dict,Events_dict):
##########################################
# New dictionary by state classification of weather data
##########################################
weather_dict={}
##########################################
# index of weather data point in previous data
##########################################
try:
temp_idx=wdata_name.index('TemperatureC')
except:
temp_idx=[]
try:
dewp_idx=wdata_name.index('Dew_PointC')
except:
dewp_idx=[]
try:
humd_idx=wdata_name.index('Humidity')
except:
humd_idx=[]
try:
evnt_idx=wdata_name.index('Events')
except:
evnt_idx=[]
try:
cond_idx=wdata_name.index('Conditions')
except:
cond_idx=[]
######################################################################
# Weather state classification
######################################################################
for class_idx in [temp_idx,dewp_idx,humd_idx]:
obs=wdata_mat[:,class_idx][:,np.newaxis]
label,opt_num_cluster,model,score,score_err_sum=\
state_retrieval(obs,max_num_cluster=30,off_set=1,est_method='kmean',PARALLEL=IS_USING_PARALLEL_OPT,PRINTSHOW=False)
if class_idx==temp_idx:
print 'Temp state classification...'
weather_dict.update({'Temp':model.cluster_centers_})
elif class_idx==dewp_idx:
print 'Dewp state classification...'
weather_dict.update({'Dewp':model.cluster_centers_})
elif class_idx==humd_idx:
print 'Humd state classification...'
weather_dict.update({'Humd':model.cluster_centers_})
else:
print 'not found'
for label_id in range(label.max()+1):
label_idx=np.nonzero(label==label_id)[0]
wdata_mat[label_idx,class_idx]=np.round(model.cluster_centers_[label_id][0])
##################################################
# Reclassify the Condition states into clarity of the sky
##################################################
cond_state=[[]]*9
cond_state[8]=['Clear'] # Clear
cond_state[7]=['Partly Cloudy','Scattered Clouds'] # 'Partly Cloudy'
cond_state[6]=['Mostly Cloudy','Overcast'] # 'Overcast'
cond_state[5]=['Fog','Mist', 'Shallow Fog','Patches of Fog','Light Freezing Fog'] # Light Rain
cond_state[4]=['Drizzle', 'Heavy Drizzle','Light Drizzle','Light Freezing Drizzle']
cond_state[3]=['Rain','Rain Showers','Thunderstorms and Rain'\
,'Heavy Rain','Heavy Rain Showers', 'Freezing Rain','Light Freezing Rain', \
'Light Rain Showers','Light Rain','Light Thunderstorms and Rain'] # Heavy Rain
cond_state[2]=['Ice Pellets', 'Ice Crystals','Light Ice Crystals','Light Ice Pellets']
cond_state[1]=['Snow','Snow Showers','Light Snow','Light Snow Grains','Light Snow Showers'] # 'Snow'
cond_state[0]=['Unknown']
cond_data_array=wdata_mat[:,cond_idx].copy()
print 'Condition state classification...'
for k in range(len(cond_state)):
for cond_str in cond_state[k]:
if cond_str in Conditions_dict.keys():
cond_val_old=Conditions_dict[cond_str]
idx_temp=np.nonzero(cond_data_array==cond_val_old)[0]
if len(idx_temp)>0:
wdata_mat[idx_temp,cond_idx]=k
Conditions_dict_temp={}
Conditions_dict_temp.update({'Clear':8})
Conditions_dict_temp.update({'Cloudy':7})
Conditions_dict_temp.update({'Overcast':6})
Conditions_dict_temp.update({'Fog':5})
Conditions_dict_temp.update({'Drizzle':4})
Conditions_dict_temp.update({'Rain':3})
Conditions_dict_temp.update({'Ice':2})
Conditions_dict_temp.update({'Snow':1})
Conditions_dict_temp.update({'Unknown':0})
# Abbr' of weather factor type is
weather_dict.update({'Cond':Conditions_dict_temp})
####################################################################
# Reclassify the Event states into rain/snow/fog weather conditons
####################################################################
event_state=[[]]*4
event_state[0]=[''] # No event
event_state[1]=['Rain-Snow','Snow','Fog-Snow'] # Snow
event_state[2]=['Rain','Thunderstorm','Rain-Thunderstorm'] # Rain
event_state[3]=['Fog','Fog-Rain'] # Fog
print 'Event state classification...'
event_data_array=wdata_mat[:,evnt_idx].copy()
for k in range(len(event_state)):
for event_str in event_state[k]:
if event_str in Events_dict.keys():
event_val_old=Events_dict[event_str]
idx_temp=np.nonzero(event_data_array==event_val_old)[0]
if len(idx_temp)>0:
wdata_mat[idx_temp,evnt_idx]=k
Events_dict_temp={}
Events_dict_temp.update({'NoEvent':0})
Events_dict_temp.update({'Snow':1})
Events_dict_temp.update({'Rain':2})
Events_dict_temp.update({'Fog':3})
weather_dict.update({'Event':Events_dict_temp})
return wdata_mat,weather_dict
def bldg_obj_weather_convert(bldg_obj):
#import pdb;pdb.set_trace()
# For avg
if 'data_weather_mat' in bldg_obj.avg.__dict__.keys():
wdata_mat = bldg_obj.avg.data_weather_mat.copy()
wdata_name =bldg_obj.avg.weather_names
Conditions_dict= bldg_obj.Conditions_dict.copy()
Events_dict= bldg_obj.Events_dict.copy()
wdata_mat,weather_dict=weather_convert(wdata_mat,wdata_name, Conditions_dict,Events_dict)
bldg_obj.avg.weather_dict=weather_dict
bldg_obj.avg.data_weather_mat_=wdata_mat
# For diff
if 'data_weather_mat' in bldg_obj.diff.__dict__.keys():
wdata_mat = bldg_obj.diff.data_weather_mat.copy()
wdata_name =bldg_obj.diff.weather_names
Conditions_dict= bldg_obj.Conditions_dict.copy()
Events_dict= bldg_obj.Events_dict.copy()
wdata_mat,weather_dict=weather_convert(wdata_mat,wdata_name, Conditions_dict,Events_dict)
bldg_obj.diff.weather_dict=weather_dict
bldg_obj.diff.data_weather_mat_=wdata_mat
def find_cond_lh_set(data_state_mat,cause_idx_set,effect_idx,obs_state):
optprob_set=np.zeros(len(cause_idx_set))
optstate_set=np.zeros(len(cause_idx_set))
for i,cause_idx in enumerate(cause_idx_set):
# Compute liklihoood of GW2 avg data state map among sensors
avg_state_temp, avg_prob_temp\
=compute_cause_likelihood(data_state_mat,[cause_idx],[[effect_idx]],[[obs_state]])
# masking its own effect
if cause_idx==effect_idx:
# and its state
max_opt_state=np.nan
# and its probability
max_opt_prob=-np.inf
else:
# find sensor index giving the maximum likelihood
max_idx=np.argmax(avg_prob_temp)
# and its state
max_opt_state=avg_state_temp[max_idx]
# and its probability
max_opt_prob=avg_prob_temp[max_idx]
optprob_set[i]=max_opt_prob
optstate_set[i]=max_opt_state
return optstate_set, optprob_set
def create_bldg_obj(dict_dir,bldg_tag,pname_key):
print '==================================='
print 'create object for ', bldg_tag+'BLDG'
print '==================================='
cmd_=bldg_tag+'data_dict = mt.loadObjectBinaryFast(dict_dir+'+'\'data_dict.bin\')'
exec(cmd_)
sig_tag_set=[]
try:
cmd_=bldg_tag+'diffdata_dict = mt.loadObjectBinaryFast(dict_dir+'+'\'diffdata_dict.bin\')'
exec(cmd_)
sig_tag_set.append('diff')
except:
pass
try:
cmd_=bldg_tag+'avgdata_dict = mt.loadObjectBinaryFast(dict_dir+'+'\'avgdata_dict.bin\')'
exec(cmd_)
sig_tag_set.append('avg')
except:
pass
###########################################################################################
for sig_tag in sig_tag_set:
cmd_str=[[]]*9
cmd_str[0]=bldg_tag+sig_tag+'data_state_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_state_mat\']'
cmd_str[1]=bldg_tag+sig_tag+'data_weather_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_weather_mat\']'
cmd_str[2]=bldg_tag+sig_tag+'data_time_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_time_mat\']'
cmd_str[3]=bldg_tag+sig_tag+'_time_slot='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'_time_slot\']'
cmd_str[4]=bldg_tag+sig_tag+'data_exemplar='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_exemplar\']'
cmd_str[5]=bldg_tag+sig_tag+'data_zvar=remove_dot('+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_zvar\'])'
cmd_str[6]=bldg_tag+sig_tag+'sensor_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'sensor_names\'])'
cmd_str[7]=bldg_tag+sig_tag+'weather_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'weather_names\'])'
cmd_str[8]=bldg_tag+sig_tag+'time_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'time_names\'])'
for cmd_ in cmd_str:
exec(cmd_)
if 'avg' in sig_tag:
print "--*--*--*--*--*--*--*--*-- create_bldg_obj::(" + sig_tag + ") data_weather_mat --*--*--*--*--*-"
exec("print " + bldg_tag+sig_tag+'data_weather_mat[:,4]')
print "--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*"
#TODO: Name correction for exemplar
if isinstance(pname_key,list)==True:
cmd_str_tmp=sig_tag+'p_names=pname_key'
exec(cmd_str_tmp)
cmd_str_tmp=bldg_tag+sig_tag+'p_idx=['+bldg_tag+sig_tag+'sensor_names.index(name_) for name_ in pname_key]'
exec(cmd_str_tmp)
else:
cmd_str_tmp=bldg_tag+sig_tag+'p_idx=grep('+'\''+pname_key+'\''+','+bldg_tag+sig_tag+'sensor_names)'
exec(cmd_str_tmp)
cmd_str_tmp=bldg_tag+sig_tag+'p_names=list(np.array('+bldg_tag+sig_tag+'sensor_names)['+bldg_tag+sig_tag+'p_idx])'
exec(cmd_str_tmp)
cmd_str_tmp=sig_tag+'p_names=list(np.array('+bldg_tag+sig_tag+'sensor_names)['+bldg_tag+sig_tag+'p_idx])'
exec(cmd_str_tmp)
print '--------------------------------------------------------'
print ' Power sensor selected -'+sig_tag
print '--------------------------------------------------------'
cmd_str_tmp='pprint.pprint('+sig_tag+'p_names)'
exec(cmd_str_tmp)
print '----------------------------------------'
print 'creating '+ bldg_tag+' obj....'
print '----------------------------------------'
cmd_str_=bldg_tag+'=obj({'+'\'avg\''+':obj({}),'+'\'diff\''+':obj({})})'
exec(cmd_str_)
for sig_tag in sig_tag_set:
print 'generating '+ sig_tag+' members....'
cmd_str=[[]]*12
#cmd_str[0]=bldg_tag+'.'+sig_tag+'=[]'
cmd_str[0]='[]'
cmd_str[1]=bldg_tag+'.'+sig_tag+'.data_state_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_state_mat\']'
cmd_str[2]=bldg_tag+'.'+sig_tag+'.data_weather_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_weather_mat\']'
cmd_str[3]=bldg_tag+'.'+sig_tag+'.data_time_mat='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_time_mat\']'
cmd_str[4]=bldg_tag+'.'+sig_tag+'.time_slot='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'_time_slot\']'
cmd_str[5]=bldg_tag+'.'+sig_tag+'.data_exemplar='+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_exemplar\']'
cmd_str[6]=bldg_tag+'.'+sig_tag+'.data_zvar=remove_dot('+bldg_tag+sig_tag+'data_dict[\''+sig_tag+'data_zvar\'])'
cmd_str[7]=bldg_tag+'.'+sig_tag+'.sensor_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'sensor_names\'])'
cmd_str[8]=bldg_tag+'.'+sig_tag+'.weather_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'weather_names\'])'
cmd_str[9]=bldg_tag+'.'+sig_tag+'.time_names=remove_dot('+bldg_tag+sig_tag+'data_dict[\'time_names\'])'
cmd_str[10]=bldg_tag+'.'+sig_tag+'.p_idx='+bldg_tag+sig_tag+'p_idx'
cmd_str[11]=bldg_tag+'.'+sig_tag+'.p_names=remove_dot('+bldg_tag+sig_tag+'p_names)'
for cmd_ in cmd_str: exec(cmd_)
#TODO: Name correction for exemplar
cmd_=bldg_tag+'.'+'Conditions_dict='+bldg_tag+'data_dict[\'Conditions_dict\']'
exec(cmd_)
cmd_=bldg_tag+'.'+'Events_dict='+bldg_tag+'data_dict[\'Events_dict\']'
exec(cmd_)
cmd_='bldg_obj_weather_convert('+bldg_tag+')'
exec(cmd_)
# Create classs strucutre for data analysis
analysis={}
for sig_tag in sig_tag_set:
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'.p_names'
exec(cmd_str_)
temp1={}
for name_ in p_names:
temp_s=obj({'optprob_set':[],'optstate_set':[]})
temp_t=obj({'optprob_set':[],'optstate_set':[]})
temp_w=obj({'optprob_set':[],'optstate_set':[]})
temp2=obj({'peak_eff_state':[],'sensor':temp_s,'time':temp_t,'weather':temp_w})
temp1.update({remove_dot(name_):temp2})
analysis.update({sig_tag:obj(temp1)})
analysis=obj(analysis)
cmd_str_=bldg_tag+'.analysis=analysis'
exec(cmd_str_)
print '-------------------------'
print 'Compute LH values'
print '-------------------------'
for sig_tag in sig_tag_set:
print sig_tag+'.....'
cmd_str_='all_data_state_mat=np.vstack(('+bldg_tag+'.'+sig_tag+'.data_state_mat.T, '\
+bldg_tag+'.'+sig_tag+'.data_time_mat.T,'+bldg_tag+'.'+sig_tag+'.data_weather_mat_.T)).T'
exec(cmd_str_)
cmd_str_='p_idx='+bldg_tag+'.'+sig_tag+'.p_idx'
exec(cmd_str_)
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'.p_names'
exec(cmd_str_)
cmd_str_='len_sensor='+bldg_tag+'.'+sig_tag+'.data_state_mat.shape[1]'
exec(cmd_str_)
cmd_str_='len_time='+bldg_tag+'.'+sig_tag+'.data_time_mat.shape[1]'
exec(cmd_str_)
cmd_str_='len_weather='+bldg_tag+'.'+sig_tag+'.data_weather_mat.shape[1]'
exec(cmd_str_)
cmd_str_='sensor_cause_idx_set=range(len_sensor)'
exec(cmd_str_)
cmd_str_='time_cause_idx_set=range(len_sensor,len_sensor+len_time)'
exec(cmd_str_)
cmd_str_='weather_cause_idx_set=range(len_sensor+len_time,len_sensor+len_time+len_weather)'
exec(cmd_str_)
for k,effect_idx in enumerate(p_idx):
print 'compute cond. prob of ' + p_names[k]
cmd_str_='p_name_='+bldg_tag+'.'+sig_tag+'.p_names[k]'
exec(cmd_str_)
# check weather it is in the set
effect_state_set=np.array(list(set(all_data_state_mat[:, effect_idx])))
eff_state=effect_state_set.max()
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.peak_eff_state=eff_state'
exec(cmd_str_)
s_optstate_set_temp,s_optprob_set_temp=\
find_cond_lh_set(all_data_state_mat,sensor_cause_idx_set,effect_idx,eff_state)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.sensor.optprob_set=s_optprob_set_temp'
exec(cmd_str_)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.sensor.optstate_set=s_optstate_set_temp'
exec(cmd_str_)
w_optstate_set_temp,w_optprob_set_temp=\
find_cond_lh_set(all_data_state_mat,weather_cause_idx_set,effect_idx,eff_state)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.weather.optprob_set=w_optprob_set_temp'
exec(cmd_str_)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.weather.optstate_set=w_optstate_set_temp'
exec(cmd_str_)
w_optstate_set_temp,w_optprob_set_temp=\
find_cond_lh_set(all_data_state_mat,time_cause_idx_set,effect_idx,eff_state)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.time.optprob_set=w_optprob_set_temp'
exec(cmd_str_)
cmd_str_=bldg_tag+'.analysis.'+sig_tag+'.'+remove_dot(p_name_)+'.time.optstate_set=w_optstate_set_temp'
exec(cmd_str_)
cmd_str_='mt.saveObjectBinaryFast('+bldg_tag+','+'bldg_tag+\'.bin\')'
exec(cmd_str_)
cmd_str_='obj_out='+bldg_tag
exec(cmd_str_)
return obj_out
def plotting_bldg_lh(bldg_,bldg_key=[],attr_class='sensor',num_picks=30):
print 'plotting lh for '+attr_class
print '============================================'
sig_tag_set=['avg','diff']
plt.ioff()
if len(bldg_key)==0:
bldg_set=bldg_.__dict__.keys()
else :
bldg_set=[bldg_key]
for bldg_tag in bldg_set:
print bldg_tag
cmd_str_= bldg_tag+'=bldg_.__dict__[bldg_tag]'
exec(cmd_str_)
print '-------------------------'
print bldg_tag
print '-------------------------'
for sig_tag in sig_tag_set:
try:
print sig_tag+'.....'
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'.p_names'
exec(cmd_str_)
for pname_ in p_names:
try:
blank_idx=pname_.index('.')
pname_=pname_.replace('.','_')
except:
pass
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.'+attr_class+'.optprob_set'
exec(cmd_str_)
cmd_str_= 's_names='+bldg_tag+'.'+sig_tag+'.'+attr_class+'_names'
exec(cmd_str_)
cmd_str_= 'optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.'+attr_class+'.optstate_set'
exec(cmd_str_)
num_picks=30
sort_idx=np.argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
plt.figure(figsize=(20.0,15.0))
plt.subplot(2,1,1)
plt.plot(sort_lh,'-*')
x_label= list(np.array(s_names)[sort_idx[:num_picks]])
cmd_str_='key_set=bldg_.__dict__.keys()'
exec(cmd_str_)
if 'convert_name' in key_set:
cmd_str_='x_label=bldg_.convert_name(x_label)'
exec(cmd_str_)
cmd_str_='pname_=bldg_.convert_name(pname_)[0]'
exec(cmd_str_)
x_ticks=range(len(x_label))
plt.xticks(x_ticks,x_label,rotation=270, fontsize="small")
if sig_tag=='avg':
plt.title('Most relavant '+ attr_class+ ' attributes to the peak (demand) of '+pname_,fontsize=20)
else:
plt.title('Most relavant '+ attr_class+ ' attributes to the peak variations of '+pname_,fontsize=20)
plt.tick_params(labelsize='large')
plt.ylim([-0.05, 1.05])
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+attr_class+'_'+sig_tag+'_lh_sensors.png', bbox_inches='tight')
plt.close()
except:
pass
plt.close()
plt.ion()
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&PPPPPPPPPPPPPP
def bldg_lh_sensitivity(bldg_,bldg_key=[],attr_class='sensor',sig_tag='avg'):
print 'compute std of lh for '+attr_class+'...'
if len(bldg_key)==0:
bldg_set=bldg_.__dict__.keys()
else :
bldg_set=[bldg_key]
bldg_lh_std_log={}
for bldg_tag in bldg_set:
try:
print bldg_tag
cmd_str_= bldg_tag+'=bldg_.__dict__[bldg_tag]'
exec(cmd_str_)
print '-------------------------'
print bldg_tag
print '-------------------------'
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'.p_names'
exec(cmd_str_)
lh_std_log={}
for pname_ in p_names:
try:
blank_idx=pname_.index('.')
pname_=pname_.replace('.','_')
except:
pass
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.'+attr_class+'.optprob_set'
exec(cmd_str_)
cmd_str_= 's_names='+bldg_tag+'.'+sig_tag+'.'+attr_class+'_names'
exec(cmd_str_)
cmd_str_= 'optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.'+attr_class+'.optstate_set'
exec(cmd_str_)
lh_std=np.std(np.sort(optprob_set)[1:])
lh_std_log.update({bldg_.convert_name(pname_)[0]:lh_std})
bldg_lh_std_log.update({bldg_tag:lh_std_log})
except:
pass
return obj(bldg_lh_std_log)
###################################################&&&&&!!!!!
def bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag='avg',num_picks_bn=15,learning_alg='hc'):
cmd_str_='s_names=bldg_obj.'+sig_tag+'.sensor_names'
exec(cmd_str_)
p_idx=s_names.index(p_name)
cmd_str_='data_state_mat=bldg_obj.'+sig_tag+'.data_state_mat'
exec(cmd_str_)
if not (attr=='all') :
cmd_str_='optprob_set=bldg_obj.analysis.'+sig_tag+'.__dict__[p_name].'+attr+'.optprob_set'
exec(cmd_str_)
cmd_str_='optstate_set=bldg_obj.analysis.'+sig_tag+'.__dict__[p_name].'+attr+'.optstate_set'
sort_idx=np.argsort(optprob_set)[::-1]
if (attr=='sensor') :
print 'power - sensors...'
cmd_str_='s_names=bldg_obj.'+sig_tag+'.sensor_names'
exec(cmd_str_)
idx_select=[p_idx]+ list(sort_idx[:num_picks_bn])
cmd_str_='bndata_mat=bldg_obj.'+sig_tag+'.data_state_mat[:,idx_select]'
exec(cmd_str_)
cols=[s_names[k] for k in idx_select]
elif (attr=='weather'):
print 'power - weather...'
cmd_str_='w_names=bldg_obj.'+sig_tag+'.weather_names'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+'.data_state_mat[:,p_idx].T,bldg_obj.'+sig_tag+'.data_weather_mat_.T)).T'
exec(cmd_str_)
cols=[p_name]+[w_name for w_name in w_names]
if 'avg' in sig_tag:
print "--*--*--*--*- bn_anaylsis::sig_tag [" + sig_tag + "] data_weather_mat_ --*--*--*--*--*--*--"
exec('print bldg_obj.'+sig_tag+'.data_weather_mat_[:,4]')
print "--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--*--"
elif (attr=='time'):
print 'power - time...'
cmd_str_='t_names=bldg_obj.'+sig_tag+'.time_names'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+\
'.data_state_mat[:,p_idx].T,bldg_obj.'+sig_tag+'.data_time_mat.T)).T'
exec(cmd_str_)
cols=[p_name]+[t_name for t_name in t_names]
elif (attr=='all'):
print 'power - sensors + weather + time ...'
s_cause_label,s_labels,s_hc,s_cp_mat,s_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
t_cause_label,t_labels,t_hc,t_cp_mat,t_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='time',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
w_cause_label,w_labels,w_hc,w_cp_mat,w_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='weather',sig_tag=sig_tag,num_picks_bn=num_picks_bn,learning_alg=learning_alg)
#s_cause_label=s_labels; w_cause_label=w_labels;t_cause_label=t_labels
cmd_str_='s_cause_idx=[bldg_obj.'+sig_tag+'.sensor_names.index(name_) for name_ in s_cause_label]'
exec(cmd_str_)
cmd_str_='t_cause_idx=[bldg_obj.'+sig_tag+'.time_names.index(name_) for name_ in t_cause_label]'
exec(cmd_str_)
cmd_str_='w_cause_idx=[bldg_obj.'+sig_tag+'.weather_names.index(name_) for name_ in w_cause_label]'
exec(cmd_str_)
cmd_str_='bndata_mat=np.vstack((bldg_obj.'+sig_tag+'.data_state_mat[:,p_idx].T,\
bldg_obj.'+sig_tag+'.data_state_mat[:,s_cause_idx].T, \
bldg_obj.'+sig_tag+'.data_weather_mat_[:,w_cause_idx].T, \
bldg_obj.'+sig_tag+'.data_time_mat[:,t_cause_idx].T)).T'
exec(cmd_str_)
cmd_str_='cols=[name_ for name_ in [p_name]+s_cause_label+w_cause_label+t_cause_label]'
exec(cmd_str_)
else:
print 'error'
return 0
if (attr=='all'):
b_arc_list = pair_in_idx([p_name],s_cause_label+ w_cause_label+t_cause_label)+\
pair_in_idx(s_cause_label,w_cause_label+t_cause_label)+\
pair_in_idx(w_cause_label,t_cause_label)+\
pair_in_idx(t_cause_label,t_cause_label)
#import pdb;pdb.set_trace()
elif(attr=='time'):
b_arc_list = pair_in_idx([cols[0]],cols[1:])+pair_in_idx(cols[1:],cols[1:])
else:
b_arc_list = pair_in_idx([cols[0]],cols[1:])
black_arc_frame = rbn.construct_arcs_frame(b_arc_list)
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
if learning_alg=='tabu':
hc_b = rbn.bnlearn.tabu(data_frame,blacklist=black_arc_frame,score='bic')
elif learning_alg=='mmhc':
hc_b = rbn.bnlearn.mmhc(data_frame,blacklist=black_arc_frame,score='bic')
else:
hc_b = rbn.bnlearn.hc(data_frame,blacklist=black_arc_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
cause_label=list(np.array(cols)[np.nonzero(amat[:,0]==1)[0]])
cause_idx=[cols.index(label_) for label_ in cause_label]
return cause_label,cols, hc_b, amat,bndata_mat
def peak_analysis(cause_label,effect_label,col_labels,bndata_mat):
if isinstance(cause_label,list)==True:
cause_idx=[col_labels.index(label_) for label_ in cause_label]
else:
cause_idx=[col_labels.index(label_) for label_ in [cause_label]]
if isinstance(effect_label,list)==True:
effect_idx=[col_labels.index(label_) for label_ in effect_label]
else:
effect_idx=[col_labels.index(label_) for label_ in [effect_label]]
effect_state_set=list(set(bndata_mat[:,effect_idx].T[0]))
LOW_PEAK_STATE_EFFECT=np.min(effect_state_set)
HIGH_PEAK_STATE_EFFECT=np.max(effect_state_set)
high_peak_state_temp, high_peak_prob_temp=\
compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[HIGH_PEAK_STATE_EFFECT]])
low_peak_state_temp, low_peak_prob_temp=\
compute_cause_likelihood(bndata_mat,cause_idx,[effect_idx],[[LOW_PEAK_STATE_EFFECT]])
high_peak_state=np.array(high_peak_state_temp)
high_peak_prob=np.array(high_peak_prob_temp)
low_peak_state=np.array(low_peak_state_temp)
low_peak_prob=np.array(low_peak_prob_temp)
return low_peak_state,low_peak_prob,high_peak_state,high_peak_prob
def get_tick_symbol(tick_state_val,cause_labels_,Event,Cond):
if len(cause_labels_)==1:
iter_zip=zip(cause_labels_,tick_state_val.T[np.newaxis,:])
else:
iter_zip=zip(cause_labels_,tick_state_val.T)
symbol_tuple=[]
for cause_label_,state_val_ in iter_zip:
symbol_out=[]
if (isinstance(state_val_,np.ndarray)==False) and (isinstance(state_val_,list)==False):
state_val_=[state_val_]
temp=list(set(state_val_))
if list(np.sort(temp))==[-1,0,1]:
cause_label_='PEAK'
for sval_ in state_val_:
if cause_label_=='MTH':
symbol_out.append(monthDict[sval_])
elif cause_label_=='WD':
symbol_out.append(weekDict[sval_])
elif cause_label_=='HR':
symbol_out.append(hourDict[sval_])
elif cause_label_=='Dew_PointC':
ssymbol_out.append(str(sval_)+'C')
elif cause_label_=='Humidity':
symbol_out.append(str(sval_)+'%')
elif cause_label_=='Events':
symbol_out.append([key_ for key_,val_ in Event.items() if val_==sval_])
elif cause_label_=='Conditions':
symbol_out.append([key_ for key_,val_ in Cond.items() if val_==sval_])
elif cause_label_=='TemperatureC':
symbol_out.append(str(sval_)+'C')
elif cause_label_=='PEAK':
symbol_out.append(stateDict[sval_])
else:
symbol_out.append(str(sval_))
symbol_tuple.append(symbol_out)
temp_=np.array(symbol_tuple)
temp2=temp_.reshape(len(cause_labels_),np.prod(temp_.shape)/len(cause_labels_)).T
return [tuple(symbol_) for symbol_ in temp2]
def bn_prob_analysis(bldg_obj,sig_tag_='avg'):
cmd_str='Event=bldg_obj.'+sig_tag_+'.weather_dict[\'Event\']'
exec(cmd_str)
cmd_str='Cond=bldg_obj.'+sig_tag_+'.weather_dict[\'Cond\']'
exec(cmd_str)
bn_out_set={}
cmd_str='p_name_set=bldg_obj.analysis.'+sig_tag_+'.__dict__.keys()'
exec(cmd_str)
for p_name in p_name_set:
try:
# bn analysis - Power-Sensor
s_cause_label,s_labels,s_hc,s_cp_mat,s_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='sensor',sig_tag=sig_tag_,num_picks_bn=5)
# bn analysis -Power-Time
t_cause_label,t_labels,t_hc,t_cp_mat,t_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='time',sig_tag=sig_tag_,num_picks_bn=10)
# bn analysis -Power-Weather
w_cause_label,w_labels,w_hc,w_cp_mat,w_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='weather',sig_tag=sig_tag_,num_picks_bn=10)
# bn analysis -Power-Sensor+Time+Weather
all_cause_label,all_labels,all_hc,all_cp_mat,all_bndata_mat=\
bn_anaylsis(bldg_obj,p_name,attr='all',sig_tag=sig_tag_,num_picks_bn=20)
# prob analysis -Power-Sensor+Time+Weather
cause_label=all_cause_label;col_labels=all_labels;
effect_label=p_name; bndata_mat=all_bndata_mat
low_peak_state,low_peak_prob,high_peak_state,high_peak_prob=\
peak_analysis(cause_label,effect_label,col_labels,bndata_mat)
x_set=low_peak_state
all_cause_symbol_xlabel=get_tick_symbol(x_set,all_cause_label,Event,Cond)
all_cause_symbol_xtick=range(len(low_peak_state))
# BN-PROB STORE
bn_out={'s_cause_label':s_cause_label,'s_labels':s_labels,'s_hc':s_hc,\
's_cp_mat':s_cp_mat, 's_bndata_mat':s_bndata_mat,'t_cause_label':t_cause_label,\
't_labels':t_labels,'t_hc':t_hc,'t_cp_mat':t_cp_mat,'t_bndata_mat':t_bndata_mat, \
'w_cause_label':w_cause_label,'w_labels':w_labels,'w_hc':w_hc,'w_cp_mat':w_cp_mat,\
'w_bndata_mat':w_bndata_mat,'all_cause_label':all_cause_label,'all_labels':all_labels,\
'all_hc':all_hc,'all_cp_mat':all_cp_mat,'all_bndata_mat':all_bndata_mat,
'low_peak_state':low_peak_state,'low_peak_prob':low_peak_prob,\
'high_peak_state':high_peak_state,'high_peak_prob':high_peak_prob,\
'all_cause_symbol_xlabel':all_cause_symbol_xlabel,'all_cause_symbol_xtick':all_cause_symbol_xtick}
bn_out_set.update({p_name:bn_out})
except:
print '*** Error in processing bn_prob for ', p_name, '! ****'
pass
return obj(bn_out_set)
def compute_bn_sensors(bldg_obj,sig_tag='avg',learning_alg='hill'):
cmd_str_='s_names=bldg_obj.'+sig_tag+'.sensor_names'
exec(cmd_str_)
cmd_str_='bndata_mat=bldg_obj.'+sig_tag+'.data_state_mat'
exec(cmd_str_)
cols=s_names
const_idx=np.nonzero(np.array([ len(set(col)) for col in bndata_mat.T])<2)[0]
bndata_mat=np.delete(bndata_mat,const_idx,1)
cols=list(np.delete(cols,const_idx,0))
factor_data_mat = rbn.convert_pymat_to_rfactor(bndata_mat)
data_frame = rbn.construct_data_frame(factor_data_mat,cols)
if learning_alg=='tabu':
hc_b = rbn.bnlearn.tabu(data_frame,score='bic')
elif learning_alg=='mmhc':
hc_b = rbn.bnlearn.mmhc(data_frame,score='bic')
else:
hc_b = rbn.bnlearn.hc(data_frame,score='bic')
amat = rbn.py_get_amat(hc_b)
return hc_b,cols,amat
def plotting_bldg_bn(bldg_):
plt.ioff()
#if 'convert_name' not in bldg_.__dict__.keys():
# bldg_.convert_name = lambda name_: [name_]
for bldg_tag in bldg_.__dict__.keys():
print 'Getting anal_out from '+ bldg_tag
anal_out_found=True
try:
cmd_str='anal_out=bldg_.__dict__[\''+bldg_tag+'\'].anal_out'
exec(cmd_str)
except:
anal_out_found=False
if anal_out_found==True:
for sig_tag in ['avg','diff']:
if sig_tag in anal_out.__dict__.keys():
anal_out_sig=anal_out.__dict__[sig_tag]
p_name_sets=anal_out_sig.__dict__.keys()
for p_name in p_name_sets:
bn_out=anal_out_sig.__dict__[p_name]
cmd_str='pname_=bldg_.convert_name(p_name)[0]'
exec(cmd_str)
try:
fig_name='BN for Sensors '+pname_
plt.figure(fig_name,figsize=(30.0,30.0))
col_name=bldg_.convert_name(bn_out.s_labels)
rbn.nx_plot(bn_out.s_hc,col_name,graph_layout='spring',node_text_size=30)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_bn_sensors'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
try:
fig_name='BN for Time '+pname_
plt.figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.t_hc,bldg_.convert_name(bn_out.t_labels),graph_layout='spring',node_text_size=30)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_bn_time'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
try:
fig_name='BN for Weather '+pname_
plt.figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.w_hc,bldg_.convert_name(bn_out.w_labels),graph_layout='spring',node_text_size=30)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_bn_weather'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
try:
fig_name='BN for Sensor-Time-Weather '+pname_
plt.figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.all_hc,bldg_.convert_name(bn_out.all_labels),graph_layout='spring',node_text_size=30)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_bn_sensor_time_weather'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
try:
fig_name='BN PEAK LH Analysis for Sensor-Time-Weather '+pname_
plt.figure(fig_name, figsize=(30.0,30.0))
plt.subplot(2,1,1)
plt.plot(bn_out.all_cause_symbol_xtick,bn_out.high_peak_prob,'-^')
plt.plot(bn_out.all_cause_symbol_xtick,bn_out.low_peak_prob,'-.v')
plt.ylabel('Likelihood',fontsize=20)
plt.xticks(bn_out.all_cause_symbol_xtick,bn_out.all_cause_symbol_xlabel,rotation=270, fontsize=20)
plt.tick_params(labelsize=20)
plt.legend(('High Peak', 'Low Peak'),loc='center right', prop={'size':25})
plt.tick_params(labelsize=20)
plt.grid();plt.ylim([-0.05,1.05])
plt.title('Likelihood of '+ str(remove_dot(pname_))+\
' given '+'\n'+str(remove_dot(bldg_.convert_name(bn_out.all_cause_label))), fontsize=20)
plt.savefig(fig_dir+bldg_tag+'_'+pname_+'_'+sig_tag+'_LH_sensor_time_weather'+get_pngid()+'.png', bbox_inches='tight')
plt.close()
except:
print 'error in '+fig_name
pass
plt.ion()
##############################################################################
# Obslete library files
##############################################################################
"""
plt.ioff()
for bldg_tag in bldg_tag_set:
print '-------------------------'
print bldg_tag
print '-------------------------'
for sig_tag in sig_tag_set:
print sig_tag+'.....'
cmd_str_='p_names='+bldg_tag+'.'+sig_tag+'p_names'
exec(cmd_str_)
for pname_ in p_names:
try:
blank_idx=pname_.index('.')
pname_=pname_.replace('.','_')
except:
pass
cmd_str_='optprob_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.optprob_set'
exec(cmd_str_)
cmd_str_= 's_names='+bldg_tag+'.'+sig_tag+'sensor_names'
exec(cmd_str_)
cmd_str_= 'optstate_set='+bldg_tag+'.analysis.'+sig_tag+'.'+pname_+'.optstate_set'
exec(cmd_str_)
num_picks=30
sort_idx=argsort(optprob_set)[::-1]
sort_lh=optprob_set[sort_idx[:num_picks]].T
sort_state=optstate_set[sort_idx[:num_picks]].T
fig=figure(figsize=(20.0,15.0))
subplot(2,1,1)
plt.plot(sort_lh,'-*')
x_label= list(np.array(s_names)[sort_idx[:num_picks]])
x_ticks=range(len(x_label))
plt.xticks(x_ticks,x_label,rotation=270, fontsize="small")
if sig_tag=='avg':
plt.title('Most relavant '+bldg_tag +'sensors to the peak (demand) of '+pname_,fontsize=20)
else:
plt.title('Most relavant '+bldg_tag +'sensors to the peak variations of '+pname_,fontsize=20)
plt.tick_params(labelsize='large')
plt.ylabel('Likelihood (From 0 to 1)',fontsize=18)
#plt.get_current_fig_manager().window.showMaximized()
plt.savefig(fig_dir+pname_+'_'+sig_tag+'_lh_sensors.png', bbox_inches='tight')
plt.close()
plt.ion()
def interpolation_measurement_2(data_dict,input_names,err_rate=1,sgm_bnd=20):
print 'interploattion starts....'
measurement_point_set=[]
num_of_discrete_val=[]
sampling_interval_set=[]
num_type_set=[]
err_rate=1;sgm_bnd=20
for i,key_name in enumerate(input_names):
print key_name,'.....'
start_time = time.time()
v = mt.loadObjectBinaryFast(str(key_name) + FL_EXT)
t_=np.array(v[2][0])
if len(t_) == 0:
continue
intpl_intv=np.ceil((t_[-1]-t_[0]) /len(t_))
sampling_interval_set.append(intpl_intv)
val_=np.array(v[2][1])
num_of_discrete_val_temp=len(set(val_))
num_of_discrete_val.append(num_of_discrete_val_temp)
# filtering outlier
# assuming 1% of errors and 30 x standard deviation rules
outlier_idx=outlier_detect(val_,err_rate,sgm_bnd)
if len(outlier_idx)>0:
print 'outlier samples are detected: ', 'outlier_idx:', outlier_idx
t_=np.delete(t_,outlier_idx)
val_=np.delete(val_,outlier_idx)
t_new=np.r_[t_[0]:t_[-1]:intpl_intv]
num_type=check_data_type(v[2][1])
if num_type==INT_TYPE:
val_new=fast_nearest_interp(t_new, t_,val_)
else:
#num_type=FLOAT_TYPE
val_new = np.interp(t_new, t_,val_)
c=np.vstack([t_new,val_new])
measurement_point_set.append(c)
num_type_set.append(num_type)
print 'interpolation_measurement one iteration done...'
mt.print_report(start_time)
print '-----------------------------------------------------------------'
#return measurement_point_set,num_type_set,num_of_discrete_val,sampling_interval_set
return measurement_point_set,np.array(num_type_set)
"""
| gpl-2.0 | -6,386,651,215,079,214,000 | 46.074036 | 156 | 0.550858 | false |
jualjiman/knowledge-base | src/knowledge_base/users/api.py | 1 | 6130 | # -*- coding: utf-8 -*-
import os
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.decorators import detail_route
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from knowledge_base.api.v1.routers import router
from knowledge_base.core.api.mixins import base as base_mixins
from knowledge_base.core.api.routers.single import SingleObjectRouter
from knowledge_base.core.api.viewsets import GenericViewSet
from knowledge_base.users.serializers import (
ProfileSerializer, ProfileUpdateImageSerializer,
ProfileUpdateSerializer, SearchUserSerializer
)
from knowledge_base.utils.urlresolvers import get_query_params
class ProfileViewSet(
base_mixins.RetrieveModelMixin,
base_mixins.PartialUpdateModelMixin,
GenericViewSet
):
serializer_class = ProfileSerializer
retrieve_serializer_class = ProfileSerializer
update_serializer_class = ProfileUpdateSerializer
change_image_serializer_class = ProfileUpdateImageSerializer
permission_classes = (IsAuthenticated, )
def retrieve(self, request, pk=None):
"""
Gets the user profile information.
---
response_serializer: ProfileSerializer
omit_serializer: false
responseMessages:
- code: 200
message: OK
- code: 403
message: FORBIDDEN
- code: 404
message: NOT FOUND
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
return super(ProfileViewSet, self).retrieve(request)
def partial_update(self, request):
"""
Updates the user profile information.
---
request_serializer: ProfileSerializer
response_serializer: ProfileSerializer
omit_serializer: false
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 403
message: FORBIDDEN
- code: 404
message: NOT FOUND
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
return super(ProfileViewSet, self).partial_update(request)
@detail_route(methods=['PUT'])
def change_image(self, request, *args, **kwars):
"""
Allows the session's user to update his profile image.
---
request_serializer: ProfileUpdateImageSerializer
response_serializer: ProfileSerializer
responseMessages:
- code: 200
message: OK
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
user = request.user
# Serializer that will be used to validate the information.
update_serializer = self.get_serializer(
user,
data=request.data,
partial=True,
action='change_image'
)
update_serializer.is_valid(raise_exception=True)
self.perform_delete_image()
updated_user = update_serializer.save()
retrieve_serializer = self.get_serializer(
updated_user,
action='retrieve'
)
return Response(retrieve_serializer.data, status=status.HTTP_200_OK)
@detail_route(methods=['DELETE'])
def delete_image(self, request, *args, **kwars):
"""
Allows delete the image for current user.
omit_serializer: true
---
responseMessages:
- code: 204
message: NO CONTENT
- code: 400
message: BAD REQUEST
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
self.perform_delete_image()
return Response(status=status.HTTP_204_NO_CONTENT)
def get_object(self):
return self.request.user
def perform_delete_image(self):
user = self.request.user
if user.photo and os.path.isfile(user.photo.path):
os.remove(user.photo.path)
user.photo = None
if user.thumbnail and os.path.isfile(user.thumbnail.path):
os.remove(user.thumbnail.path)
user.thumbnail = None
user.save()
class SearchUserViewSet(base_mixins.ListModelMixin, GenericViewSet):
serializer_class = SearchUserSerializer
list_serializer_class = SearchUserSerializer
permission_classes = (IsAuthenticated, )
def get_queryset(self, *args, **kwargs):
queryset = get_user_model().objects.filter(is_active=True)
query_params = get_query_params(self.request)
q = query_params.get('q')
if q:
queryset = queryset.filter(email__icontains=q)
return queryset
def list(self, request, *args, **kwargs):
"""
Return a list of users, that matches with the given word.
---
response_serializer: SearchUserSerializer
parameters:
- name: q
description: Search word.
paramType: query
type: string
responseMessages:
- code: 200
message: OK
- code: 403
message: FORBIDDEN
- code: 500
message: INTERNAL SERVER ERROR
consumes:
- application/json
produces:
- application/json
"""
return super(SearchUserViewSet, self).list(request, *args, **kwargs)
router.register(
'me',
ProfileViewSet,
base_name='me',
router_class=SingleObjectRouter
)
router.register(
r'users/search',
SearchUserViewSet,
base_name='users-search'
)
| apache-2.0 | -7,804,503,449,449,819,000 | 27.915094 | 76 | 0.596574 | false |
google-research/episodic-curiosity | episodic_curiosity/constants.py | 1 | 6595 | # coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for episodic curiosity."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from enum import Enum
class Level(object):
"""Represents a DMLab level, possibly with additional non-standard settings.
Attributes:
dmlab_level_name: Name of the DMLab level
fully_qualified_name: Unique name used to distinguish between multiple DMLab
levels with the same name but different settings.
extra_env_settings: dict, additional DMLab environment settings for this
level.
random_maze: Whether the geometry of the maze is supposed to change when we
change the seed.
use_r_net_from_level: If provided, don't train a R-net for this level, but
instead, use the trained R-net from another level
(identified by its fully qualified name).
include_in_paper: Whether this level is included in the paper.
scenarios: Optional list of scenarios this level is used for.
"""
def __init__(self,
dmlab_level_name,
fully_qualified_name = None,
extra_env_settings = None,
random_maze = False,
use_r_net_from_level = None,
include_in_paper = False,
scenarios = None):
self.dmlab_level_name = dmlab_level_name
self.fully_qualified_name = fully_qualified_name or dmlab_level_name
self.extra_env_settings = extra_env_settings or {}
self.random_maze = random_maze
self.use_r_net_from_level = use_r_net_from_level
self.include_in_paper = include_in_paper
self.scenarios = scenarios
def asdict(self):
return vars(self)
class SplitType(Enum):
R_TRAINING = 0
POLICY_TRAINING = 3
VALIDATION = 1
TEST = 2
class Const(object):
"""Constants"""
MAX_ACTION_DISTANCE = 5
NEGATIVE_SAMPLE_MULTIPLIER = 5
# env
OBSERVATION_HEIGHT = 120
OBSERVATION_WIDTH = 160
OBSERVATION_CHANNELS = 3
OBSERVATION_SHAPE = (OBSERVATION_HEIGHT, OBSERVATION_WIDTH,
OBSERVATION_CHANNELS)
# model and training
BATCH_SIZE = 64
EDGE_CLASSES = 2
DUMP_AFTER_BATCHES = 100
EDGE_MAX_EPOCHS = 2000
ADAM_PARAMS = {
'lr': 1e-04,
'beta_1': 0.9,
'beta_2': 0.999,
'epsilon': 1e-08,
'decay': 0.0
}
ACTION_REPEAT = 4
STORE_CHECKPOINT_EVERY_N_EPOCHS = 30
LEVELS = [
# Levels on which we evaluate episodic curiosity.
# Corresponds to 'Sparse' setting in the paper
# (arxiv.org/pdf/1810.02274.pdf).
Level('contributed/dmlab30/explore_goal_locations_large',
fully_qualified_name='explore_goal_locations_large',
random_maze=True,
include_in_paper=True,
scenarios=['sparse', 'noreward', 'norewardnofire']),
# WARNING!! For explore_goal_locations_large_sparse and
# explore_goal_locations_large_verysparse to work properly (i.e. taking
# into account minGoalDistance), you need to use the dmlab MPM:
# learning/brain/research/dune/rl/dmlab_env_package.
# Corresponds to 'Very Sparse' setting in the paper.
Level(
'contributed/dmlab30/explore_goal_locations_large',
fully_qualified_name='explore_goal_locations_large_verysparse',
extra_env_settings={
# Forces the spawn and goals to be further apart.
# Unfortunately, we cannot go much higher, because we need to
# guarantee that for any goal location, we can at least find one
# spawn location that is further than this number (the goal
# location might be in the middle of the map...).
'minGoalDistance': 10,
},
use_r_net_from_level='explore_goal_locations_large',
random_maze=True, include_in_paper=True,
scenarios=['verysparse']),
# Corresponds to 'Sparse+Doors' setting in the paper.
Level('contributed/dmlab30/explore_obstructed_goals_large',
fully_qualified_name='explore_obstructed_goals_large',
random_maze=True,
include_in_paper=True,
scenarios=['sparseplusdoors']),
# Two levels where we expect to show episodic curiosity does not hurt.
# Corresponds to 'Dense 1' setting in the paper.
Level('contributed/dmlab30/rooms_keys_doors_puzzle',
fully_qualified_name='rooms_keys_doors_puzzle',
include_in_paper=True,
scenarios=['dense1']),
# Corresponds to 'Dense 2' setting in the paper.
Level('contributed/dmlab30/rooms_collect_good_objects_train',
fully_qualified_name='rooms_collect_good_objects_train',
include_in_paper=True,
scenarios=['dense2']),
]
MIXER_SEEDS = {
# Equivalent to not setting a mixer seed. Mixer seed to train the
# R-network.
SplitType.R_TRAINING: 0,
# Mixer seed for training the policy.
SplitType.POLICY_TRAINING: 0x3D23BE66,
SplitType.VALIDATION: 0x2B79ED94, # Invented.
SplitType.TEST: 0x600D5EED, # Same as DM's.
}
@staticmethod
def find_level(fully_qualified_name):
"""Finds a DMLab level by fully qualified name."""
for level in Const.LEVELS:
if level.fully_qualified_name == fully_qualified_name:
return level
# Fallback to the DMLab level with the corresponding name.
return Level(fully_qualified_name,
extra_env_settings = {
# Make 'rooms_exploit_deferred_effects_test',
# 'rooms_collect_good_objects_test' work.
'allowHoldOutLevels': True
})
@staticmethod
def find_level_by_scenario(scenario):
"""Finds a DMLab level by scenario name."""
for level in Const.LEVELS:
if level.scenarios and scenario in level.scenarios:
return level
raise ValueError('Scenario "{}" not found.'.format(scenario))
| apache-2.0 | 3,986,055,241,371,008,500 | 36.471591 | 80 | 0.643821 | false |
pterk/django-bop | bop/api.py | 1 | 5312 | import operator
from django.contrib.auth.models import User, Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from bop.models import ObjectPermission
def get_model_perms(model):
return [p[0] for p in model._meta.permissions] + \
[model._meta.get_add_permission(),
model._meta.get_change_permission(),
model._meta.get_delete_permission()]
def has_model_perms(user, model):
for perm in user.get_all_permissions():
app_label, codename = perm.split('.')
if model._meta.app_label == app_label and \
codename in get_model_perms(model):
return True
return False
# I terify: BOO!
def iterify(obj, exceptions=(basestring,)):
""" iteryfy makes sure `obj` is iterable
(by turning any value that isn't iterable into a list)
>>> from bop.api import iterify
>>> things = [1, "string", ('a', 'tuple'), {'name': 'dict', 'another': 'value'}, set(['test', 1, 3]), [1,3,4], None, [None]]
>>> for thing in things:
... for x in iterify(thing):
... print x
...
1
string
a
tuple
name
another
test
1
3
1
3
4
None
None
>>>
>>> for thing in things:
... for x in iterify(thing, (basestring, dict)):
... if isinstance(x, dict):
... d = x.items()
... d.sort()
... print d
... else:
... print x
...
1
string
a
tuple
[('another', 'value'), ('name', 'dict')]
test
1
3
1
3
4
None
None
>>>
"""
if hasattr(obj, '__iter__'):
# To future self: string has __iter__ in python3
if not isinstance(obj, exceptions):
return obj
return [obj]
def resolve(iterable, model, key=None):
resolved = []
for i in iterify(iterable):
if isinstance(i, model):
resolved.append(i)
if isinstance(i, (basestring, int)):
if key is None or isinstance(key ,int):
key = 'pk'
if hasattr(key, '__call__'):
i = key(i)
else:
i = {key: i}
if isinstance(i, dict):
try:
resolved.append(model.objects.get(**i))
except model.DoesNotExist:
pass
return resolved
def is_object_permission(obj, permission, ct):
return permission.content_type == ct and \
obj._meta.app_label == permission.content_type.app_label and \
permission.codename in get_model_perms(obj)
#(permission.codename in [x[0] for x in obj._meta.permissions] \
# or permission.codename in (obj._meta.get_add_permission(),
# obj._meta.get_change_permission(),
# obj._meta.get_delete_permission()))
def perm2dict(perm):
app_label, codename = perm.split(".")
return {"content_type__app_label": app_label, "codename": codename}
def _make_lists_of_objects(users, groups, permissions, objects):
# Make sure all 'objects' are model-instances
users = resolve(users, User, key='username')
groups = resolve(groups, Group, key='name')
permissions = resolve(permissions, Permission, key=perm2dict)
# objects *must* be model-instances already
return (users, groups, permissions, iterify(objects))
def grant(users, groups, permissions, objects):
users, groups, permissions, objects = \
_make_lists_of_objects(users, groups, permissions, objects)
for o in objects:
if not hasattr(o, '_meta'):
continue
ct = ContentType.objects.get_for_model(o)
for p in permissions:
if is_object_permission(o, p, ct):
for u in users:
ObjectPermission.objects.get_or_create(user=u,
permission=p,
object_id=o.id,
content_type=ct)
for g in groups:
ObjectPermission.objects.get_or_create(group=g,
permission=p,
object_id=o.id,
content_type=ct)
def revoke(users, groups, permissions, objects):
users, groups, permissions, objects = \
_make_lists_of_objects(users, groups, permissions, objects)
userlist = []
grouplist = []
for o in objects:
ct = ContentType.objects.get_for_model(o)
for p in permissions:
if is_object_permission(o, p, ct):
for u in users:
userlist.append(Q(user=u))
for g in groups:
grouplist.append(Q(group=g))
Qs = userlist+grouplist
if not Qs:
continue
ObjectPermission.objects.filter(
reduce(operator.or_, Qs),
content_type=ct, object_id=o.id,permission=p
).delete()
| mit | -8,878,782,757,124,568,000 | 30.431953 | 128 | 0.512236 | false |
ColorTyWorld/GISRS | src/python/landsat/ndvi.py | 1 | 8766 | import sys, os, math, time
import arcpy
from arcpy import env
from arcpy.sa import *
arcpy.CheckOutExtension("spatial")
#Metadata exists in one of two standard formats (finds the correct name for each field)
def acquireMetadata(metadata, band):
band = str(band)
metadatalist = []
if ("RADIANCE_MAXIMUM_BAND_" + band) in metadata.keys():
BANDFILE = "FILE_NAME_BAND_" + band
LMAX = "RADIANCE_MAXIMUM_BAND_" + band
LMIN = "RADIANCE_MINIMUM_BAND_" + band
QCALMAX = "QUANTIZE_CAL_MAX_BAND_" + band
QCALMIN = "QUANTIZE_CAL_MIN_BAND_" + band
DATE = "DATE_ACQUIRED"
metadatalist = [BANDFILE, LMAX, LMIN, QCALMAX, QCALMIN, DATE]
elif ("LMAX_BAND" + band) in metadata.keys():
BANDFILE = "BAND" + band + "_FILE_NAME"
LMAX = "LMAX_BAND" + band
LMIN = "LMIN_BAND" + band
QCALMAX = "QCALMAX_BAND" + band
QCALMIN = "QCALMIN_BAND" + band
DATE ="ACQUISITION_DATE"
metadatalist = [BANDFILE, LMAX, LMIN, QCALMAX, QCALMIN, DATE]
else:
arcpy.AddError('There was a problem reading the metadata for this file. Please make sure the _MTL.txt is in Level 1 data format')
return metadatalist
#Calculate the radiance from metadata on band.
def calcRadiance (LMAX, LMIN, QCALMAX, QCALMIN, QCAL, band):
LMAX = float(LMAX)
LMIN = float(LMIN)
QCALMAX = float(QCALMAX)
QCALMIN = float(QCALMIN)
gain = (LMAX - LMIN)/(QCALMAX-QCALMIN)
inraster = Raster(QCAL)
outname = 'RadianceB'+str(band)+'.tif'
arcpy.AddMessage('Band'+str(band))
arcpy.AddMessage('LMAX ='+str(LMAX))
arcpy.AddMessage('LMIN ='+str(LMIN))
arcpy.AddMessage('QCALMAX ='+str(QCALMAX))
arcpy.AddMessage('QCALMIN ='+str(QCALMIN))
arcpy.AddMessage('gain ='+str(gain))
outraster = (gain * (inraster-QCALMIN)) + LMIN
#outraster.save(outname)
return outraster
def calcReflectance(solarDist, ESUN, solarElevation, radiance, scaleFactor):
#Value for solar zenith is 90 degrees minus solar elevation (angle from horizon to the center of the sun)
# See Landsat7_Handbook 11.3.2 Radiance to Reflectance
solarZenith = ((90.0 - (float(solarElevation)))*math.pi)/180 #Converted from degrees to radians
solarDist = float(solarDist)
ESUN = float(ESUN)
outname = 'ReflectanceB'+str(band)+'.tif'
arcpy.AddMessage('Band'+str(band))
arcpy.AddMessage('solarDist ='+str(solarDist))
arcpy.AddMessage('solarDistSquared ='+str(math.pow(solarDist, 2)))
arcpy.AddMessage('ESUN ='+str(ESUN))
arcpy.AddMessage('solarZenith ='+str(solarZenith))
outraster = (math.pi * radiance * math.pow(solarDist, 2)) / (ESUN * math.cos(solarZenith)) * scaleFactor
return outraster
#Calculate the solar distance based on julian day
def calcSolarDist (jday):
#Values taken from d.csv file which is a formatted version of the d.xls file
#associated with the Landsat7 handbook, representing the distance of the sun
#for each julian day (1-366).
#this line keeps the relative path were this script is executing
filepath = os.path.join(os.path.dirname(sys.argv[0]), 'd.csv')
f = open(filepath, "r")
lines = f.readlines()[2:]
distances = []
for x in range(len(lines)):
distances.append(float(lines[x].strip().split(',')[1]))
f.close()
jday = int(jday)
dist = distances[jday - 1]
return dist
def calcJDay (date):
#Seperate date aspects into list (check for consistnecy in formatting of all
#Landsat7 metatdata) YYYY-MM-DD
dt = date.rsplit("-")
#Cast each part of the date as a in integer in the 9 int tuple mktime
t = time.mktime((int(dt[0]), int(dt[1]), int(dt[2]), 0, 0, 0, 0, 0, 0))
#As part of the time package the 7th int in mktime is calulated as Julian Day
#from the completion of other essential parts of the tuple
jday = time.gmtime(t)[7]
return jday
def getESUN(bandNum, SIType):
SIType = SIType
ESUN = {}
#from NASA's Landsat7_Handbook Table 11.3
#ETM+ Solar Spectral Irradiances (generated using the combined Chance-Kurucz Solar Spectrum within MODTRAN 5)
if SIType == 'ETM+ ChKur':
ESUN = {'b1':1970,'b2':1842,'b3':1547,'b4':1044,'b5':225.7,'b7':82.06,'b8':1369}
#from NASA's Landsat7_Handbook Table 9.1
#from the LPS ACCA algorith to correct for cloud cover
if SIType == 'LPS ACAA Algorithm':
ESUN = {'b1':1969,'b2':1840,'b3':1551,'b4':1044,'b5':225.7,'b7':82.06,'b8':1368}
#from Revised Landsat-5 TM Radiometric Calibration Procedures and Postcalibration, Table-2
#Gyanesh Chander and Brian Markham. Nov 2003.
#Landsat 5 ChKur
if SIType == 'Landsat 5 ChKur':
ESUN = {'b1':1957,'b2':1826,'b3':1554,'b4':1036,'b5':215,'b7':80.67}
#from Revised Landsat-5 TM Radiometric Calibration Procedures and Postcalibration, Table-2
#Gyanesh Chander and Brian Markham. Nov 2003.
#Landsat 4 ChKur
if SIType == 'Landsat 4 ChKur':
ESUN = {'b1':1957,'b2':1825,'b3':1557,'b4':1033,'b5':214.9,'b7':80.72}
bandNum = str(bandNum)
return ESUN[bandNum]
def readMetadata(metadataFile):
f = metadataFile
#Create an empty dictionary with which to populate all the metadata fields.
metadata = {}
#Each item in the txt document is seperated by a space and each key is
#equated with '='. This loop strips and seperates then fills the dictonary.
for line in f:
if not line.strip() == "END":
val = line.strip().split('=')
metadata [val[0].strip()] = val[1].strip().strip('"')
else:
break
return metadata
#Takes the unicode parameter input from Arc and turns it into a nice python list
def cleanList(bandList):
bandList = list(bandList)
for x in range(len(bandList)):
bandList[x] = str(bandList[x])
while ';' in bandList:
bandList.remove(';')
return bandList
#////////////////////////////////////MAIN LOOP///////////////////////////////////////
# TM5
work_dic = 'F:\\Data\\HRB\\RS\\Landsat\\Landsat5\\TM\\132_32\\LT51320322011318IKR01\\'
metadataPath = work_dic + 'LT51320322011318IKR01_MTL.txt'
out_dic = 'F:\\Data\\HRB\\RS\\Landsat\\Landsat5\\TM\\132_32\\LT51320322011318IKR01\\'
SIType = 'Landsat 5 ChKur'
keepRad = 'false'
keepRef = 'true'
scaleFactor = 1.0
min_ndvi = 0.15
env.workspace = work_dic
arcpy.env.overwriteOutput = True
ref_file_exit = 'false'
arcpy.AddMessage(scaleFactor)
if SIType =='Landsat 4 ChKur' :
bandList = cleanList(['5','7'])
else:
bandList = cleanList(['3','4'])
metadataFile = open(metadataPath)
metadata = readMetadata(metadataFile)
metadataFile.close()
successful = []
failed = []
if SIType =='Landsat 4 ChKur' :
# from http://landsat.gsfc.nasa.gov/the-multispectral-scanner-system/
# band 5 and 7 of MSS are equivalent to 3 and 4 of TM
ref_file_exit = os.path.exists(work_dic + "ReflectanceB5.tif")
ref_file_exit = os.path.exists(work_dic + "ReflectanceB7.tif")
else:
ref_file_exit = os.path.exists(work_dic + "ReflectanceB3.tif")
ref_file_exit = os.path.exists(work_dic + "ReflectanceB4.tif")
if ref_file_exit:
metlist = acquireMetadata(metadata, '5')
print 'Reflectance files existed'
else:
print 'Calculating reflectances'
for band in bandList:
bandstr = str(band)
print bandstr
metlist = acquireMetadata(metadata, band)
BANDFILE = metlist[0]
LMAX = metlist[1]
LMIN = metlist[2]
QCALMAX = metlist[3]
QCALMIN = metlist[4]
DATE = metlist[5]
ESUNVAL = "b" + band
#try:
radianceRaster = calcRadiance(metadata[LMAX], metadata[LMIN], metadata[QCALMAX], metadata[QCALMIN], metadata[BANDFILE], band)
reflectanceRaster = calcReflectance(calcSolarDist(calcJDay(metadata[DATE])), getESUN(ESUNVAL, SIType), metadata['SUN_ELEVATION'], radianceRaster, scaleFactor)
outname = 'ReflectanceB'+ bandstr
reflectanceRaster.save(outname)
successful.append(BANDFILE)
DATE = metlist[5]
day = metadata[DATE]
if SIType =='Landsat 4 ChKur' :
nir = Raster('ReflectanceB7.tif')
red = Raster('ReflectanceB5.tif')
else:
nir = Raster('ReflectanceB4.tif')
red = Raster('ReflectanceB3.tif')
ndvi_out_ras = out_dic + "ndvi_" + day + ".tif"
print 'Calculating NDVI'
raw_ndvi = (nir-red)/(nir+red)
ndvi = Con((raw_ndvi < min_ndvi) | (raw_ndvi > 1.0), 0, raw_ndvi)
arcpy.gp.SetNull_sa(ndvi, ndvi, ndvi_out_ras, "value = 0")
print 'NDVI file saved'
if keepRef != 'true':
arcpy.Delete_management(nir)
arcpy.Delete_management(red)
print 'Reflectance files deleted'
| gpl-3.0 | -8,988,101,215,451,264,000 | 32.458015 | 166 | 0.646019 | false |
lechat/jenkinsflow | doc/source/conf.py | 1 | 11257 | # -*- coding: utf-8 -*-
#
# jenkinsflow documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 16 09:04:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
major_version = sys.version_info.major
if major_version < 3:
import subprocess32 as subprocess
else:
import subprocess
from os.path import join as jp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
_here = os.path.dirname(os.path.abspath(__file__))
_top_dir = os.path.dirname(os.path.dirname(_here))
sys.path.insert(0, os.path.dirname(_top_dir))
import jenkinsflow.setup
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinxcontrib.programoutput',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = jenkinsflow.setup.PROJECT_NAME
copyright = jenkinsflow.setup.COPYRIGHT
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = str(subprocess.check_output([sys.executable, jp(_top_dir, 'setup.py'), '--version']))
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'jenkinsflowdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'jenkinsflow.tex', u'jenkinsflow Documentation',
u'Lars Hupfeldt Nielsen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jenkinsflow', u'jenkinsflow Documentation',
[u'Lars Hupfeldt Nielsen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'jenkinsflow', u'jenkinsflow Documentation',
u'Lars Hupfeldt Nielsen', 'jenkinsflow', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'jenkinsflow'
epub_author = u'Lars Hupfeldt Nielsen'
epub_publisher = u'Lars Hupfeldt Nielsen'
epub_copyright = jenkinsflow.setup.COPYRIGHT
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'jenkinsflow'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def remove_module_docstring(app, what, name, obj, options, lines):
if what == "module" and name in ("jenkinsflow.set_build_result"):
# Distinguish between the script and the module documentstion
if 'members' in options:
# Module: Delete the module __doc__string which is for the script invocation
# del lines[:]
pass
def setup(app):
app.connect("autodoc-process-docstring", remove_module_docstring)
| bsd-3-clause | 9,181,886,077,884,321,000 | 30.620787 | 95 | 0.709603 | false |
hpcugent/easybuild-framework | easybuild/tools/multidiff.py | 1 | 10576 | # #
# Copyright 2014-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Module which allows the diffing of multiple files
:author: Toon Willems (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
import difflib
import math
import os
from vsc.utils import fancylogger
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import read_file
from easybuild.tools.systemtools import det_terminal_size
SEP_WIDTH = 5
# text colors
PURPLE = "\033[0;35m"
# background colors
GREEN_BACK = "\033[0;42m"
RED_BACK = "\033[0;41m"
# end character for colorized text
END_COLOR = "\033[0m"
# meaning characters in diff context
HAT = '^'
MINUS = '-'
PLUS = '+'
SPACE = ' '
QUESTIONMARK = '?'
END_LONG_LINE = '...'
# restrict displaying of differences to limited number of groups
MAX_DIFF_GROUPS = 3
_log = fancylogger.getLogger('multidiff', fname=False)
class MultiDiff(object):
"""
Class representing a multi-diff.
"""
def __init__(self, base_fn, base_lines, files, colored=True):
"""
MultiDiff constructor
:param base: base to compare with
:param files: list of files to compare with base
:param colored: boolean indicating whether a colored multi-diff should be generated
"""
self.base_fn = base_fn
self.base_lines = base_lines
self.files = files
self.colored = colored
self.diff_info = {}
def parse_line(self, line_no, diff_line, meta, squigly_line):
"""
Register a diff line
:param line_no: line number
:param diff_line: diff line generated by difflib
:param meta: meta information (e.g., filename)
:param squigly_line: squigly line indicating which characters changed
"""
# register (diff_line, meta, squigly_line) tuple for specified line number and determined key
key = diff_line[0]
if not key in [MINUS, PLUS]:
raise EasyBuildError("diff line starts with unexpected character: %s", diff_line)
line_key_tuples = self.diff_info.setdefault(line_no, {}).setdefault(key, [])
line_key_tuples.append((diff_line, meta, squigly_line))
def color_line(self, line, color):
"""Create colored version of given line, with given color, if color mode is enabled."""
if self.colored:
line = ''.join([color, line, END_COLOR])
return line
def merge_squigly(self, squigly1, squigly2):
"""Combine two squigly lines into a single squigly line."""
sq1 = list(squigly1)
sq2 = list(squigly2)
# longest line is base
base, other = (sq1, sq2) if len(sq1) > len(sq2) else (sq2, sq1)
for i, char in enumerate(other):
if base[i] in [HAT, SPACE] and base[i] != char:
base[i] = char
return ''.join(base)
def colorize(self, line, squigly):
"""Add colors to the diff line based on the squigly line."""
if not self.colored:
return line
# must be a list so we can insert stuff
chars = list(line)
flag = ' '
offset = 0
color_map = {
HAT: GREEN_BACK if line.startswith(PLUS) else RED_BACK,
MINUS: RED_BACK,
PLUS: GREEN_BACK,
}
if squigly:
for i, squigly_char in enumerate(squigly):
if squigly_char != flag:
chars.insert(i + offset, END_COLOR)
offset += 1
if squigly_char in [HAT, MINUS, PLUS]:
chars.insert(i + offset, color_map[squigly_char])
offset += 1
flag = squigly_char
chars.insert(len(squigly) + offset, END_COLOR)
else:
chars.insert(0, color_map.get(line[0], ''))
chars.append(END_COLOR)
return ''.join(chars)
def get_line(self, line_no):
"""
Return the line information for a specific line
:param line_no: line number to obtain information for
:return: list with text lines providing line information
"""
output = []
diff_dict = self.diff_info.get(line_no, {})
for key in [MINUS, PLUS]:
lines, changes_dict, squigly_dict = set(), {}, {}
# obtain relevant diff lines
if key in diff_dict:
for (diff_line, meta, squigly_line) in diff_dict[key]:
if squigly_line:
# merge squigly lines
if diff_line in squigly_dict:
squigly_line = self.merge_squigly(squigly_line, squigly_dict[diff_line])
squigly_dict[diff_line] = squigly_line
lines.add(diff_line)
# track meta info (which filenames are relevant)
changes_dict.setdefault(diff_line, set()).add(meta)
# sort: lines with most changes last, limit number to MAX_DIFF_GROUPS
lines = sorted(lines, key=lambda line: len(changes_dict[line]))[:MAX_DIFF_GROUPS]
for diff_line in lines:
squigly_line = squigly_dict.get(diff_line, '')
line = ['%s %s' % (line_no, self.colorize(diff_line, squigly_line))]
# mention to how may files this diff applies
files = changes_dict[diff_line]
num_files = len(self.files)
line.append("(%d/%d)" % (len(files), num_files))
# list files to which this diff applies (don't list all files)
if len(files) != num_files:
line.append(', '.join(files))
output.append(' '.join(line))
# prepend spaces to match line number length in non-color mode
if not self.colored and squigly_line:
prepend = ' ' * (2 + int(math.log10(line_no)))
output.append(''.join([prepend, squigly_line]))
# print seperator only if needed
if diff_dict and not self.diff_info.get(line_no + 1, {}):
output.extend([' ', '-' * SEP_WIDTH, ' '])
return output
def __str__(self):
"""
Create a string representation of this multi-diff
"""
def limit(text, length):
"""Limit text to specified length, terminate color mode and add END_LONG_LINE if trimmed."""
if len(text) > length:
maxlen = length - len(END_LONG_LINE)
res = text[:maxlen]
if self.colored:
res += END_COLOR
return res + END_LONG_LINE
else:
return text
_, term_width = det_terminal_size()
base = self.color_line(self.base_fn, PURPLE)
filenames = ', '.join(map(os.path.basename, self.files))
output = [
"Comparing %s with %s" % (base, filenames),
'=' * SEP_WIDTH,
]
diff = False
for i in range(len(self.base_lines)):
lines = filter(None, self.get_line(i))
if lines:
output.append('\n'.join([limit(line, term_width) for line in lines]))
diff = True
if not diff:
output.append("(no diff)")
output.append('=' * SEP_WIDTH)
return '\n'.join(output)
def multidiff(base, files, colored=True):
"""
Generate a diff for multiple files, all compared to base.
:param base: base to compare with
:param files: list of files to compare with base
:param colored: boolean indicating whether a colored multi-diff should be generated
:return: text with multidiff overview
"""
differ = difflib.Differ()
base_lines = read_file(base).split('\n')
mdiff = MultiDiff(os.path.basename(base), base_lines, files, colored=colored)
# use the MultiDiff class to store the information
for filepath in files:
lines = read_file(filepath).split('\n')
diff = differ.compare(lines, base_lines)
filename = os.path.basename(filepath)
# contruct map of line number to diff lines and mapping between diff lines
# example partial diff:
#
# - toolchain = {'name': 'goolfc', 'version': '2.6.10'}
# ? - ^ ^
#
# + toolchain = {'name': 'goolf', 'version': '1.6.20'}
# ? ^ ^
#
local_diff = {}
squigly_dict = {}
last_added = None
offset = 1
for (i, line) in enumerate(diff):
# diff line indicating changed characters on line above, a.k.a. a 'squigly' line
if line.startswith(QUESTIONMARK):
squigly_dict[last_added] = line
offset -= 1
# diff line indicating addition change
elif line.startswith(PLUS):
local_diff.setdefault(i + offset, []).append((line, filename))
last_added = line
# diff line indicated removal change
elif line.startswith(MINUS):
local_diff.setdefault(i + offset, []).append((line, filename))
last_added = line
offset -= 1
# construct the multi-diff based on the constructed dict
for line_no in local_diff:
for (line, filename) in local_diff[line_no]:
mdiff.parse_line(line_no, line.rstrip(), filename, squigly_dict.get(line, '').rstrip())
return str(mdiff)
| gpl-2.0 | -2,696,512,201,220,367,000 | 35.343643 | 104 | 0.574414 | false |
grlurton/orbf_data_validation | src/data_preparation/excel_consolidation/get_excel_metadata.py | 1 | 2188 | #%%
import xlrd
import itertools
import os
import pandas as pd
def combine_paths(directory, files):
return (os.path.join(directory, filename) for filename in files)
def get_excel_for_district(district_path):
files = os.walk(district_path)
files_per_directory = [combine_paths(walk[0],walk[2]) for walk in files]
all_files = list(itertools.chain(*files_per_directory))
return (f for f in all_files if f.endswith(('xlsx',"xls")))
def get_districts(root_path):
"""
Start from the directory containing all the districts. A district is assumed to be any
directory in root_path.
"""
return (os.path.join(root_path,directory) for directory in os.listdir(root_path) if os.path.isdir(os.path.join(root_path,directory)))
def get_districts_with_files(root_path):
return ((district, get_excel_for_district(district)) for district in get_districts(root_path))
def get_excel_metadata(filename):
try :
book = xlrd.open_workbook(filename , on_demand = True )
except :
return ((filename.replace("\\", "/")) , "error opening file" )
print(filename)
try :
if filename.endswith("xlsx"):
metadata = {"filename":[filename.replace("\\", "/")],
"user_name":[book.props["creator"]] ,
"last_modif_by":[book.props["last_modified_by"]] ,
"created":[book.props["created"]] ,
"modified":[book.props["modified"]]}
elif filename.endswith("xls"):
metadata = {"filename":[filename.replace("\\", "/")],
"user_name":[book.user_name]}
except :
metadata = ((filename.replace("\\", "/")) , "file has no props")
return pd.DataFrame.from_dict(metadata)
def full_function(root_path) :
for district, files in get_districts_with_files(root_path) :
for filename in files :
yield get_excel_metadata(filename)
#%%
data_path = 'data/raw/rbv_credes/'
out = pd.DataFrame()
for results in full_function(data_path) :
out = out.append(results)
out.to_csv(data_path + "excel_metadata.csv")
| mit | 5,083,607,058,927,558,000 | 36.385965 | 137 | 0.603291 | false |
CiscoSystems/nova | nova/tests/objects/test_objects.py | 1 | 29101 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import mock
import six
from testtools import matchers
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import exception
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova import test
class MyObj(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.5'
fields = {'foo': fields.Field(fields.Integer()),
'bar': fields.Field(fields.String()),
'missing': fields.Field(fields.String()),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def _update_test(self, context):
if context.project_id == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
def obj_make_compatible(self, primitive, target_version):
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1':
primitive['bar'] = 'old%s' % primitive['bar']
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.Field(fields.String())}
class TestMetaclass(test.TestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.NovaObjectMetaclass)
class NewBaseClass(object):
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Test1(NewBaseClass):
@staticmethod
def obj_name():
return 'fake1'
class Test2(NewBaseClass):
pass
class Test2v2(NewBaseClass):
@staticmethod
def obj_name():
return 'Test2'
expected = {'fake1': [Test1], 'Test2': [Test2, Test2v2]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Test1._obj_classes)
self.assertEqual(expected, Test2._obj_classes)
class TestObjToPrimitive(test.TestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.TestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
pass
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = obj[key]
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.context = context.RequestContext('fake-user', 'fake-project')
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def json_comparator(self, expected, obj_val):
# json-ify an object field for comparison with its db str
#equivalent
self.assertEqual(expected, jsonutils.dumps(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
base.NovaObject.indirection_api = None
def assertRemotes(self):
self.assertEqual(self.remote_object_calls, [])
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
class _RemoteTest(_BaseTestCase):
def _testable_conductor(self):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.remote_object_calls = list()
orig_object_class_action = \
self.conductor_service.manager.object_class_action
orig_object_action = \
self.conductor_service.manager.object_action
def fake_object_class_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objname'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_class_action(*args, **kwargs)
return (base.NovaObject.obj_from_primitive(result, context=args[0])
if isinstance(result, base.NovaObject) else result)
self.stubs.Set(self.conductor_service.manager, 'object_class_action',
fake_object_class_action)
def fake_object_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objinst'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_action',
fake_object_action)
# Things are remoted by default in this session
base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI()
# To make sure local and remote contexts match
self.stubs.Set(rpc.RequestContextSerializer,
'serialize_context',
lambda s, c: c)
self.stubs.Set(rpc.RequestContextSerializer,
'deserialize_context',
lambda s, c: c)
def setUp(self):
super(_RemoteTest, self).setUp()
self._testable_conductor()
def assertRemotes(self):
self.assertNotEqual(self.remote_object_calls, [])
class _TestObject(object):
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_object_dict_syntax(self):
obj = MyObj(foo=123, bar='bar')
self.assertEqual(obj['foo'], 123)
self.assertEqual(sorted(obj.items(), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.NovaObject):
fields = {'foobar': fields.Field(fields.Integer())}
obj = Foo()
# NOTE(danms): Can't use assertRaisesRegexp() because of py26
raised = False
try:
obj.foobar
except NotImplementedError as ex:
raised = True
self.assertTrue(raised)
self.assertIn('foobar', str(ex))
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.NovaObject.obj_class_from_name('MyObj', '1.25')
except exception.IncompatibleObjectVersion as error:
pass
self.assertIsNotNone(error)
self.assertEqual('1.5', error.kwargs['supported'])
def test_with_alternate_context(self):
ctxt1 = context.RequestContext('foo', 'foo')
ctxt2 = context.RequestContext('bar', 'alternate')
obj = MyObj.query(ctxt1)
obj._update_test(ctxt2)
self.assertEqual(obj.bar, 'alternate-context')
self.assertRemotes()
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj._update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save(self.context)
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh(self.context)
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertRemotes()
def test_changed_with_sub_object(self):
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = ['foo', 'bar', 'missing'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.NovaObject):
fields = {'foo': fields.Field(fields.Integer())}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
class TestObject(_LocalTest, _TestObject):
pass
class TestRemoteObject(_RemoteTest, _TestObject):
def test_major_version_mismatch(self):
MyObj2.VERSION = '2.0'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_greater(self):
MyObj2.VERSION = '1.6'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_less(self):
MyObj2.VERSION = '1.2'
obj = MyObj2.query(self.context)
self.assertEqual(obj.bar, 'bar')
self.assertRemotes()
def test_compat(self):
MyObj2.VERSION = '1.1'
obj = MyObj2.query(self.context)
self.assertEqual('oldbar', obj.bar)
class TestObjectListBase(test.TestCase):
def test_list_like_operations(self):
class MyElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyElement, self).__init__()
self.foo = foo
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyElement')}
objlist = Foo(context='foo',
objects=[MyElement(1), MyElement(2), MyElement(3)])
self.assertEqual(list(objlist), objlist.objects)
self.assertEqual(len(objlist), 3)
self.assertIn(objlist.objects[0], objlist)
self.assertEqual(list(objlist[:1]), [objlist.objects[0]])
self.assertEqual(objlist[:1]._context, 'foo')
self.assertEqual(objlist[2], objlist.objects[2])
self.assertEqual(objlist.count(objlist.objects[0]), 1)
self.assertEqual(objlist.index(objlist.objects[1]), 1)
objlist.sort(key=lambda x: x.foo, reverse=True)
self.assertEqual([3, 2, 1],
[x.foo for x in objlist])
def test_serialization(self):
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.NovaObject):
fields = {'foo': fields.Field(fields.String())}
obj = Foo(objects=[])
for i in 'abc':
bar = Bar(foo=i)
obj.objects.append(bar)
obj2 = base.NovaObject.obj_from_primitive(obj.obj_to_primitive())
self.assertFalse(obj is obj2)
self.assertEqual([x.foo for x in obj],
[y.foo for y in obj2])
def _test_object_list_version_mappings(self, list_obj_class):
# Figure out what sort of object this list is for
list_field = list_obj_class.fields['objects']
item_obj_field = list_field._type._element_type
item_obj_name = item_obj_field._type._obj_name
# Look through all object classes of this type and make sure that
# the versions we find are covered by the parent list class
for item_class in base.NovaObject._obj_classes[item_obj_name]:
self.assertIn(
item_class.VERSION,
list_obj_class.child_versions.values())
def test_object_version_mappings(self):
# Find all object list classes and make sure that they at least handle
# all the current object versions
for obj_classes in base.NovaObject._obj_classes.values():
for obj_class in obj_classes:
if issubclass(obj_class, base.ObjectListBase):
self._test_object_list_version_mappings(obj_class)
def test_list_changes(self):
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.NovaObject):
fields = {'foo': fields.StringField()}
obj = Foo(objects=[])
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects.append(Bar(foo='test'))
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.obj_reset_changes()
# This should still look dirty because the child is dirty
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects[0].obj_reset_changes()
# This should now look clean because the child is clean
self.assertEqual(set(), obj.obj_what_changed())
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_deserialize_entity_newer_version(self):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport.return_value = 'backported'
obj = MyObj()
obj.VERSION = '1.25'
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
self.assertEqual('backported', result)
ser._conductor.object_backport.assert_called_with(self.context,
primitive,
'1.5')
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
| apache-2.0 | -917,078,803,880,022,400 | 35.930203 | 79 | 0.586543 | false |
ranjaykrishna/simple-amt | reject_assignments.py | 1 | 1247 | import argparse, json
import simpleamt
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--prod', action='store_false', dest='sandbox',
default=True,
help="Whether to run on the production AMT site.")
parser.add_argument('--assignment_ids_file')
parser.add_argument('--config', default='config.json', type=simpleamt.json_file)
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
if args.assignment_ids_file is None:
parser.error('Must specify --assignment_ids_file.')
with open(args.assignment_ids_file, 'r') as f:
assignment_ids = [line.strip() for line in f]
print ('This will reject %d assignments with '
'sandbox=%s' % (len(assignment_ids), str(args.sandbox)))
print 'Continue?'
s = raw_input('(Y/N): ')
if s == 'Y' or s == 'y':
print 'Rejecting assignments'
for idx, assignment_id in enumerate(assignment_ids):
print 'Rejecting assignment %d / %d' % (idx + 1, len(assignment_ids))
try:
mtc.reject_assignment(assignment_id, feedback='Invalid results')
except:
print "Could not reject: %s" % (assignment_id)
else:
print 'Aborting'
| mit | 336,828,048,293,545,300 | 35.676471 | 82 | 0.639936 | false |
tgcmteam/tgcmlinux | src/tgcm/ui/MSD/MSDActionsManager.py | 1 | 2000 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Authors : Roberto Majadas <[email protected]>
# Cesar Garcia Tapia <[email protected]>
# Oier Blasco <[email protected]>
# Alvaro Peña <[email protected]>
#
# Copyright (c) 2003-2012, Telefonica Móviles España S.A.U.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this library; if not, write to the Free
# Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import tgcm
import tgcm.core.Actions
class MSDActionsManager:
def __init__(self, conn_manager):
tgcm.info("Init tgcmActionsManager")
self._actions_manager = tgcm.core.Actions.ActionManager()
self.conn_manager = conn_manager
self.systray = None
self.action_list = self._actions_manager.get_actions()
def set_systray(self, systray):
self.systray = systray
def set_connections_model(self, model):
for x in self.action_list.keys():
obj = self.action_list[x]
obj.set_connections_model(model)
def connect_signals(self):
for x in self.action_list.keys():
obj = self.action_list[x]
obj.connect_signals()
def launch_action(self, action_codename):
obj = self.action_list[action_codename]
obj.launch(self.conn_manager)
def launch_help(self, action_codename):
obj = self.action_list[action_codename]
obj.launch_help()
| gpl-2.0 | 3,031,636,920,097,830,000 | 33.431034 | 68 | 0.68002 | false |
pp-mo/pylogy | newideas.py | 1 | 3123 | #
# mechanism
#
def Pred__possibles(args):
for rule in self.rules:
for possible in rule.possibles(args):
yield possible
def Rule__possibles(args):
locals = self.match_args(self.args, args)
if locals is None:
return
# E.G. "pred(X, 1)" matches ("Q", "V") producing {'X':Var("Q"), 'V':Lit(1)}
# (and returns [X, 1])
for term in self.terms:
local, term_args = term.fill_args(locals)
# E.G. "call(X, V, 3)" matches from {'X':Var("Q"),'V':Lit(1)}
# producing {X:Q, V:1} and returning [X, 1, 3]
# E.G. "call(X, Z)" with {X:Q, V:1} matches [Q, Z] yielding {X:Q, V:1, Z:V(xxx)}
# and then P(Z) would match [Z].
for possible in term.pred.possibles(term_args): # NB *not* locals
yield possible
def Rule__match_args(args):
vars = {}
for arg in self.args:
vars = arg.match_vars(arg, vars) # None if fail
if vars is None:
break
return vars
_uid = 0
def new_temp_var(basename):
uid += 1
name = name + '_' + str(uid)
return Var(name)
def Term__fill_args(vars):
args = []
for arg in self.args:
# E.G. I(4) returns (I(4))
# E.G. V('X') pulls from {X:3} returning (I(3))
# E.G. V('Z') pulls from {V:Z} returning (V(Z))
# E.G. V('Q') adds {Q:V()}
if isinstance(arg, VarArg):
if arg.name not in vars:
vars[arg.name] = new_temp_var(arg.name)
arg = vars[arg.name]
elif isintance(arg, LiteralArg):
pass
else:
raise ValueError()
args.append(arg)
return args
def LiteralArg__match_term(term, vars):
if isinstance(term, LiteralArg):
# E.G. f(X, 2) ?match (_Y, Lit(?))
if self.value == term.value:
# E.G. f(X, 2) ?match (_Y, 2)
pass # ok
else:
# E.G. f(X, 2) ?match (_Y, 3)
vars = None # fail
elif isinstance(term, VarArg):
# E.G. f(X, 2) ?match f(_Y, Q)
existing = vars.get(term.name)
if not existing:
# E.G. f(X, 2) ?match f(_Y, _Q)
vars[term.name] = term
elif vars[term].value == self.value:
# E.G. f(X, 2) ?match f(_Y, Z)
pass
else
return vars
def VarArg__match_term(term, vars):
name = self.name
if isinstance(term, LiteralArg):
# E.G. f(X) ?match (3)
if name in vars:
vars[name] = new_temp_var(name)
vars[name] = term
elif isinstance(term, VarArg):
existing = vars.get(self.name)
if not existing:
vars[self.name] = term
else:
raise ValueError
return vars
def ConsArg__match_term(term, vars):
if (isinstance(term, LiteralTerm) and
isinstance(term.value, list) and len(term.value) > 0):
vars = self.head.match_vars(make_term(term.value[0]), vars)
if vars is not None:
vars = self.tail.match_vars(make_term(term.value[1:]), vars)
else:
raise ValueError
return vars
| gpl-3.0 | -2,520,361,608,389,034,000 | 29.617647 | 88 | 0.51521 | false |
SauloAislan/ironic | ironic/conf/opts.py | 1 | 3708 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from oslo_log import log
import ironic.conf
_default_opt_lists = [
ironic.conf.default.api_opts,
ironic.conf.default.driver_opts,
ironic.conf.default.exc_log_opts,
ironic.conf.default.hash_opts,
ironic.conf.default.image_opts,
ironic.conf.default.img_cache_opts,
ironic.conf.default.netconf_opts,
ironic.conf.default.notification_opts,
ironic.conf.default.path_opts,
ironic.conf.default.portgroup_opts,
ironic.conf.default.service_opts,
ironic.conf.default.utils_opts,
]
_opts = [
('DEFAULT', itertools.chain(*_default_opt_lists)),
('agent', ironic.conf.agent.opts),
('api', ironic.conf.api.opts),
('audit', ironic.conf.audit.opts),
('cimc', ironic.conf.cisco.cimc_opts),
('cinder', ironic.conf.cinder.list_opts()),
('cisco_ucs', ironic.conf.cisco.ucsm_opts),
('conductor', ironic.conf.conductor.opts),
('console', ironic.conf.console.opts),
('database', ironic.conf.database.opts),
('deploy', ironic.conf.deploy.opts),
('dhcp', ironic.conf.dhcp.opts),
('drac', ironic.conf.drac.opts),
('glance', ironic.conf.glance.list_opts()),
('ilo', ironic.conf.ilo.opts),
('inspector', ironic.conf.inspector.list_opts()),
('ipmi', ironic.conf.ipmi.opts),
('irmc', ironic.conf.irmc.opts),
('iscsi', ironic.conf.iscsi.opts),
('keystone', ironic.conf.keystone.opts),
('metrics', ironic.conf.metrics.opts),
('metrics_statsd', ironic.conf.metrics_statsd.opts),
('neutron', ironic.conf.neutron.list_opts()),
('oneview', ironic.conf.oneview.opts),
('pxe', ironic.conf.pxe.opts),
('service_catalog', ironic.conf.service_catalog.list_opts()),
('snmp', ironic.conf.snmp.opts),
('ssh', ironic.conf.ssh.opts),
('swift', ironic.conf.swift.list_opts()),
]
def list_opts():
"""Return a list of oslo.config options available in Ironic code.
The returned list includes all oslo.config options. Each element of
the list is a tuple. The first element is the name of the group, the
second element is the options.
The function is discoverable via the 'ironic' entry point under the
'oslo.config.opts' namespace.
The function is used by Oslo sample config file generator to discover the
options.
:returns: a list of (group, options) tuples
"""
return _opts
def update_opt_defaults():
log.set_defaults(
default_log_levels=[
'amqp=WARNING',
'amqplib=WARNING',
'qpid.messaging=INFO',
# TODO(therve): when bug #1685148 is fixed in oslo.messaging, we
# should be able to remove one of those 2 lines.
'oslo_messaging=INFO',
'oslo.messaging=INFO',
'sqlalchemy=WARNING',
'stevedore=INFO',
'eventlet.wsgi.server=INFO',
'iso8601=WARNING',
'paramiko=WARNING',
'requests=WARNING',
'neutronclient=WARNING',
'glanceclient=WARNING',
'urllib3.connectionpool=WARNING',
'keystonemiddleware.auth_token=INFO',
'keystoneauth.session=INFO',
]
)
| apache-2.0 | 3,209,014,342,729,036,300 | 33.654206 | 77 | 0.653722 | false |
mjakob/notin | tests/__init__.py | 1 | 5817 | """Unit and regression tests for Notin."""
import hashlib
import io
import os
import sys
import textwrap
import unittest
try:
from unittest import mock
except ImportError:
import mock
import pkg_resources
import notin
class CapturingTestCase(unittest.TestCase):
"""A test case that captures stderr."""
def setUp(self):
super(CapturingTestCase, self).setUp()
self.stdout = sys.stdout = io.StringIO()
self.stderr = sys.stderr = io.StringIO()
def tearDown(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
class LogTests(CapturingTestCase):
"""Unit and regression tests for notin's log() function."""
def test_simple_message(self):
notin.log("some message")
target_output = "{}: some message\n".format(sys.argv[0])
self.assertEqual(self.stderr.getvalue(), target_output)
def test_formated_message(self):
notin.log("format: {} {keyword}", 'positional', keyword='keyword')
target_output = "{}: format: positional keyword\n".format(sys.argv[0])
self.assertEqual(self.stderr.getvalue(), target_output)
class DataTestCase(unittest.TestCase):
"""A test case with paths to data directories."""
def setUp(self):
super(DataTestCase, self).setUp()
self.a = pkg_resources.resource_filename(__name__, 'data/a')
self.b = pkg_resources.resource_filename(__name__, 'data/b')
class ListfilesTests(DataTestCase):
"""Unit and regression tests for notin's listfiles() function."""
def test_list_a(self):
self.assertEqual(list(sorted(notin.listfiles(self.a))), [
os.path.join(self.a, 'c/same.txt'),
os.path.join(self.a, 'different.txt'),
os.path.join(self.a, 'same.txt'),
])
def test_list_a_with_symlink_to_b(self):
link = os.path.join(self.a, 'b')
try:
os.symlink(self.b, link)
except (NotImplementedError, OSError):
self.skipTest("unable to create symlink")
try:
self.assertEqual(list(sorted(notin.listfiles(self.a))), [
os.path.join(self.a, 'c/same.txt'),
os.path.join(self.a, 'different.txt'),
os.path.join(self.a, 'same.txt'),
])
finally:
os.remove(link)
def test_list_a_with_log(self):
log = mock.Mock()
list(notin.listfiles(self.a, log))
log.assert_has_calls([
mock.call(numfiles=2, dirpath=self.a),
mock.call(numfiles=1, dirpath=os.path.join(self.a, 'c')),
])
class Md5sumTests(DataTestCase):
"""Unit and regression tests for notin's md5sum() function."""
def test_on_same(self):
md5sum = notin.md5sum(os.path.join(self.a, 'same.txt'))
self.assertEqual(md5sum, hashlib.md5(b'abc\n').digest())
other_md5sum = notin.md5sum(os.path.join(self.b, 'same.txt'))
self.assertEqual(other_md5sum, md5sum)
def test_on_different(self):
md5sum = notin.md5sum(os.path.join(self.a, 'different.txt'))
self.assertEqual(md5sum, hashlib.md5(b'acb\n').digest())
other_md5sum = notin.md5sum(os.path.join(self.b, 'different.txt'))
self.assertNotEqual(other_md5sum, md5sum)
class MainTests(DataTestCase, CapturingTestCase):
"""Unit and regression tests for notin's main() function."""
def test_scan_a(self):
sys.argv[1:] = [self.b, self.a]
notin.main()
sorted_stdout = list(sorted(self.stdout.getvalue().splitlines()))
self.assertEqual(sorted_stdout, [
os.path.join(self.a, 'different.txt'),
])
self.assertEqual(self.stderr.getvalue(), "")
def test_scan_a_with_verbose_flag(self):
sys.argv[1:] = ['--verbose', self.b, self.a]
notin.main()
sorted_stdout = list(sorted(self.stdout.getvalue().splitlines()))
self.assertEqual(sorted_stdout, [
os.path.join(self.a, 'different.txt'),
])
self.assertEqual(self.stderr.getvalue(), textwrap.dedent("""\
{executable}: Indexing files in {b}
{executable}: Hashing 3 files in {b}
{executable}: Hashing 1 files in {bd}
{executable}: Scaning {a} for unique files
{executable}: Checking 2 files in {a}
{executable}: Checking 1 files in {ac}
""").format(
executable=sys.argv[0],
a=self.a, ac=os.path.join(self.a, 'c'),
b=self.b, bd=os.path.join(self.b, 'd'),
))
def test_scan_b(self):
sys.argv[1:] = [self.a, self.b]
notin.main()
sorted_stdout = list(sorted(self.stdout.getvalue().splitlines()))
self.assertEqual(sorted_stdout, [
os.path.join(self.b, 'different.txt'),
os.path.join(self.b, 'unique.txt'),
])
self.assertEqual(self.stderr.getvalue(), "")
def test_scan_b_with_verbose_flag(self):
sys.argv[1:] = ['--verbose', self.a, self.b]
notin.main()
sorted_stdout = list(sorted(self.stdout.getvalue().splitlines()))
self.assertEqual(sorted_stdout, [
os.path.join(self.b, 'different.txt'),
os.path.join(self.b, 'unique.txt'),
])
self.assertEqual(self.stderr.getvalue(), textwrap.dedent("""\
{executable}: Indexing files in {a}
{executable}: Hashing 2 files in {a}
{executable}: Hashing 1 files in {ac}
{executable}: Scaning {b} for unique files
{executable}: Checking 3 files in {b}
{executable}: Checking 1 files in {bd}
""").format(
executable=sys.argv[0],
a=self.a, ac=os.path.join(self.a, 'c'),
b=self.b, bd=os.path.join(self.b, 'd'),
))
| gpl-3.0 | 5,096,358,491,854,065,000 | 32.624277 | 78 | 0.587932 | false |
WladimirSidorenko/SentiLex | scripts/visualize_graph.py | 1 | 9464 | #!/usr/bin/env python2.7
##################################################################
# Imports
from __future__ import print_function, unicode_literals
from germanet import Germanet
from wordnet import Wordnet
from collections import Counter, defaultdict
from itertools import chain
from matplotlib import collections as mc
import argparse
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import numpy as np
import sys
##################################################################
# Constants
WORDNET = "wordnet"
GERMANET = "germanet"
REL2COLOR = {
# GermaNet
"causes": "#00F5FF",
"entails": "#00F5FF",
"has_antonym": "#8B1A1A",
"has_component_meronym": "#00008B",
"has_member_meronym": "#00008B",
"has_portion_meronym": "#00008B",
"has_substance_meronym": "#00008B",
"has_participle": "#FFA54f",
"has_pertainym": "#FFFF00",
"has_hypernym": "#8b4789",
"has_hyponym": "#8b4789",
"is_related_to": "#006400",
# WordNet
"Hyponym": "#8b4789",
"Instance Hyponym": "#8b4789",
"Antonym": "#8B1A1A",
"Member holonym": "#00008B",
"Part holonym": "#00008B",
"Substance holonym": "#00008B",
"Verb Group": "#00CD00",
"Member meronym": "#00008B",
"Part meronym": "#00008B",
"Substance meronym": "#00008B",
"Similar to": "#FF7256",
"Entailment": "#00F5FF",
"Derivationally related form": "#006400",
"Member of this domain - TOPIC": "#EE82EE",
"Member of this domain - REGION": "#EE82EE",
"Member of this domain - USAGE": "#EE82EE",
"Domain of synset - TOPIC": "#EE82EE",
"Domain of synset - REGION": "#EE82EE",
"Domain of synset - USAGE": "#EE82EE",
"Participle of verb": "#FFA54F",
"Attribute": "#FFA500",
"Cause": "#00F5FF",
"Hypernym": "#8b4789",
"Instance Hypernym": "#8b4789",
"Derived from adjective": "#FFFF00",
"Also see": "#006400"
}
REL2LABEL = {
# GermaNet
"has_antonym": "antonym",
"has_component_meronym": "meronym",
"has_member_meronym": "meronym",
"has_portion_meronym": "meronym",
"has_substance_meronym": "meronym",
"has_participle": "participle",
"has_pertainym": "pertainym",
"has_hypernym": "hypernym",
"has_hyponym": "hyponym",
"is_related_to": "related_to",
# WordNet
"Hyponym": "hyponym",
"Instance Hyponym": "hyponym",
"Antonym": "antonym",
"Member holonym": "holonym",
"Part holonym": "holonym",
"Substance holonym": "holonym",
"Verb Group": "verb group",
"Member meronym": "meronym",
"Part meronym": "meronym",
"Substance meronym": "meronym",
"Similar to": "similar to",
"Entailment": "entailment",
"Derivationally related form": "related_to",
"Member of this domain - TOPIC": "domain member",
"Member of this domain - REGION": "domain member",
"Member of this domain - USAGE": "domain member",
"Domain of synset - TOPIC": "domain",
"Domain of synset - REGION": "domain",
"Domain of synset - USAGE": "domain",
"Participle of verb": "participle",
"Attribute": "attribute",
"Cause": "cause",
"Hypernym": "hypernym",
"Instance Hypernym": "hypernym",
"Derived from adjective": "derived_from",
"Also see": "also see"
}
AX = plt.axes()
POS2COLOR = {"verben": "#00EE76", "v": "#00EE76",
"nomen": "#36648B", "n": "#36648B",
"adj": "#FFA54F", "a": "#FFA54F",
"r": "#97FFFF", "s": "#FF4500"}
POS2LABEL = {"nomen": "noun", "n": "noun",
"verben": "verb", "v": "verb",
"adj": "adjective", "a": "adjective",
"r": "adverb", "s": "adjective satellite"}
_POS2X = {"adj": 0, "a": 0,
"nomen": 1, "n": 1,
"verben": 2, "v": 2,
"r": 0, "s": 1}
_POS2Y = {"adj": 0, "a": 0,
"nomen": 1, "n": 1.5,
"verben": 0, "v": 0,
"r": 2.5, "s": 0.35}
DE_REL_RELS = ["has_hyponym", "has_antonym",
"has_pertainym", "is_related_to",
"has_participle"]
EN_REL_RELS = ["Hyponym", "Instance Hyponym", "Antonym",
"Derived from adjective", "Derivationally related form",
"Participle of verb"]
##################################################################
# Methods
def main(a_argv):
"""Main method for visualizing WordNet databases.
@param a_argv - command-line arguments
@return \c 0 on success, non-\c 0 otherwise
"""
argparser = argparse.ArgumentParser(
description="Script for visualizing WordNet-like databases.")
argparser.add_argument("wntype",
help="type of lexical database to visualize",
choices=(WORDNET, GERMANET))
argparser.add_argument("path", help="path to the lexical database")
args = argparser.parse_args(a_argv)
# nodes' X position, Y position, and color
_X, _Y = [], []
POS2X = defaultdict(list)
POS2Y = defaultdict(list)
# pos color mapping
POS2CNT = Counter()
# mapping from pos to X range
POS2XRANGE = {}
# mapping from pos to Y range
POS2YRANGE = {}
# mapping from synset id to node's index
SYNID2NODEID = {}
SIGMA = 10
# line collection to be initialized later
lc = None
# populate nodes
if args.wntype == GERMANET:
print("Reading GermaNet synsets... ", end="", file=sys.stderr)
inet = Germanet(args.path)
print("done", file=sys.stderr)
rel_rels = DE_REL_RELS
else:
print("Reading WordNet synsets... ", end="", file=sys.stderr)
inet = Wordnet(args.path)
print("done", file=sys.stderr)
rel_rels = EN_REL_RELS
# obtain available parts of speech
POS2CNT.update(inet.synid2pos.itervalues())
poses = set(inet.synid2pos.itervalues())
nposes = float(len(poses))
rpart = 500000. / min(3, nposes)
# populate colors and ranges for parts of speech
x = y = 0
for ipos in poses:
x = _POS2X[ipos]
y = _POS2Y[ipos]
POS2XRANGE[ipos] = x * rpart
POS2YRANGE[ipos] = y * rpart
# add nodes to the graph
x = y = 0.
invsigma = 2.
if args.wntype == WORDNET:
assert ("00704270", "s") in inet.synid2pos, \
"('00704270', 's') is missing"
for i, (isynid, ipos) in enumerate(inet.synid2pos.iteritems()):
# print("isynid =", repr(isynid), file=sys.stderr)
# sys.exit(66)
SYNID2NODEID[isynid] = i
x = np.random.normal(POS2XRANGE[ipos],
POS2CNT[ipos] / invsigma)
y = np.random.normal(POS2YRANGE[ipos],
POS2CNT[ipos] / invsigma)
_X.append(x)
POS2X[ipos].append(x)
_Y.append(y)
POS2Y[ipos].append(y)
# add edges to the graph
lines = []
lcolors = []
lex_rels = None
from_idx = to_idx = x_from = x_to = y_from = y_to = 0
if args.wntype == GERMANET:
iterrels = inet.con_relations.iteritems()
else:
iterrels = inet.relations.iteritems()
for ifrom, irels in iterrels:
# print("ifrom =", repr(ifrom), file=sys.stderr)
# sys.exit(66)
from_idx = SYNID2NODEID[ifrom]
if args.wntype == GERMANET:
lex_rels = [(to_synid, to_rel)
for from_lex in inet.synid2lexids[ifrom]
for to_lex, to_rel in inet.lex_relations[from_lex]
for to_synid in inet.lexid2synids[to_lex]]
else:
lex_rels = []
x_from, y_from = _X[from_idx], _Y[from_idx]
for (ito, irel) in chain(irels, lex_rels):
# print("irel: irel = {:s} {:d}".format(repr(irel),
# irel in rel_rels),
# file=sys.stderr)
if not irel in rel_rels:
continue
# print("rel: ifrom = {:s}, irels = {:s}".format(repr(ifrom),
# repr(irels)),
# file=sys.stderr)
if ito not in SYNID2NODEID and ito[-1] == 'a':
to_idx = SYNID2NODEID[(ito[0], 's')]
else:
to_idx = SYNID2NODEID[ito]
x_to, y_to = _X[to_idx], _Y[to_idx]
lines.append(((x_from, y_from), (x_to, y_to)))
lcolors.append(REL2COLOR.get(irel, "#FFFFFF"))
# draw edges
lc = mc.LineCollection(lines, colors=lcolors,
alpha=0.15, linestyle='-'
)
# draw the graph
AX.add_collection(lc)
for ipos, x in POS2X.iteritems():
plt.scatter(x, POS2Y[ipos], label=POS2LABEL.get(ipos, ipos),
c=[POS2COLOR[ipos]] * len(x))
# add legend for edges
handles, labels = AX.get_legend_handles_labels()
iline = ilabel = None
known_labels = set()
for irel in rel_rels:
iline = mlines.Line2D([], [], color=REL2COLOR[irel], linewidth=3.)
ilabel = REL2LABEL[irel]
if ilabel in known_labels:
continue
handles.append(iline)
labels.append(ilabel)
known_labels.add(ilabel)
plt.legend(handles, labels,
loc="upper right", scatterpoints=1)
plt.axis("off")
plt.savefig(args.wntype + ".png", dpi=200)
plt.show() # display
##################################################################
# Main
if __name__ == "__main__":
main(sys.argv[1:])
| mit | -6,633,096,336,561,518,000 | 33.414545 | 74 | 0.539941 | false |
keighrim/bananaNER | scripts/entity_extr.py | 1 | 2984 | # /usr/bin/python
# -*- coding: utf-8 -*-
"""
This program is to
extract named entities from an annotated data file
CS137B, programming assignment #1, Spring 2015
"""
import sys
reload(sys)
sys.setdefaultencoding('utf8')
__author__ = 'krim'
__date__ = '2/6/2015'
__email__ = '[email protected]'
def read(input_filename):
"""load sentences from data file"""
sentences = []
sentence = []
with open(input_filename) as in_file:
for line in in_file:
if re.search(r"^\s+$", line):
if not prev_empty:
sentences.append(sentence)
sentence = []
prev_empty = True
else:
try:
sentence.append((line.split("\t")[1].strip(),
line.split("\t")[2].strip(),
line.split("\t")[3].strip()))
except IndexError:
sentence.append((line.split("\t")[1].strip(),
line.split("\t")[2].strip(), ""))
prev_empty = False
return sentences
def find_entities(sents):
# we'll use 4 dictionaries; ORG, GEO, PERSON, OTHER
org = []
geo = []
other = []
person = []
entity = ""
for sent in sents:
for w, _, b in sent:
try:
bio = b.split("-")[0]
typ = b.split("-")[1]
except IndexError:
bio = "O"
typ = ""
# for person names, do not concatenate
if typ == "PER":
if len(entity) > 0:
cur.append(entity)
entity = ""
person.append(w)
# else, keep track of "I" tagged words and concatenate
else:
if bio == "B":
if len(entity) > 0:
cur.append(entity)
entity = w
if typ == "ORG":
cur = org
elif typ == "LOC" or typ == "GPE":
cur = geo
else:
cur = other
elif bio == "I":
entity += " " + w
else:
if len(entity) > 0:
cur.append(entity)
entity = ""
# write out lists to coresponding files
with open("org.extr", "w") as orgf, \
open("other.extr", "w") as otherf, \
open("person.extr", "w") as personf, \
open("geo.extr", "w") as geof:
for o in org:
orgf.write(o + "\n")
for ot in other:
otherf.write(ot + "\n")
for p in person:
personf.write(p + "\n")
for g in geo:
geof.write(g + "\n")
if __name__ == '__main__':
# tempted to use all.gold...
find_entities(read("../dataset/train.gold"))
| gpl-3.0 | -1,419,966,461,462,060,800 | 29.141414 | 70 | 0.413539 | false |
kedder/soaring-coupons | coupons/models.py | 1 | 8712 | from typing import Sequence
import logging
import pytz
import random
import string
import itertools
from datetime import date, datetime
from decimal import Decimal
from django.db import models
log = logging.getLogger(__name__)
SEASON_START_MONTH = 4
SEASON_END_MONTH = 10
class CouponType(models.Model):
id = models.CharField(max_length=32, primary_key=True)
price = models.DecimalField(max_digits=10, decimal_places=2)
title = models.CharField(max_length=255)
welcome_text = models.TextField(null=True)
validity_cond_text = models.CharField(max_length=255, null=True)
deafult_expiration_date = models.DateField()
in_stock = models.BooleanField(default=True)
# Template to use when printing the coupon. Will use django template in
# `templates/coupons/{}.html`
print_template = models.CharField(
max_length=32,
choices=[("flight", "Flight Coupon"), ("courses", "Courses Coupon")],
)
def __str__(self) -> str:
return self.title
class Order(models.Model):
ST_PENDING = 1
ST_PAID = 2
ST_CANCELLED = 3
ST_SPAWNED = 4
coupon_type = models.ForeignKey(CouponType, on_delete=models.CASCADE)
quantity = models.IntegerField()
price = models.DecimalField(max_digits=10, decimal_places=2)
discount = models.DecimalField(max_digits=10, decimal_places=2, default=0)
currency = models.CharField(max_length=8)
paid_amount = models.DecimalField(max_digits=10, decimal_places=2, null=True)
paid_currency = models.CharField(max_length=8, null=True)
payer_name = models.CharField(max_length=255, null=True)
payer_surname = models.CharField(max_length=255, null=True)
payer_email = models.CharField(max_length=255, null=True)
payment_provider = models.CharField(max_length=255, null=True)
test = models.BooleanField(default=False)
status = models.IntegerField(
choices=[
(ST_PENDING, "Pending"),
(ST_PAID, "Paid"),
(ST_CANCELLED, "Cancelled"),
(ST_SPAWNED, "Spawned"),
],
default=ST_PENDING,
)
create_time = models.DateTimeField()
payment_time = models.DateTimeField(null=True)
notes = models.CharField(max_length=255, null=True)
@classmethod
def from_type(cls, coupon_type: CouponType, quantity: int = 1) -> "Order":
return Order(
coupon_type=coupon_type,
quantity=quantity,
price=coupon_type.price,
currency="EUR",
create_time=datetime.now(pytz.utc),
)
def apply_discount(self, discount: int) -> None:
new_price = self.price * (1 - discount / Decimal("100"))
new_price = round(new_price, 2)
self.discount = self.price - new_price
self.price = new_price
log.info(
f"Applied {discount}% discount ({self.discount} {self.currency}) "
f"to order {self.id}"
)
def process(
self,
*,
paid_amount: float,
paid_currency: str,
payer_email: str = None,
payer_name: str = None,
payer_surname: str = None,
payment_provider: str = None,
) -> Sequence["Coupon"]:
"""Process order payment.
Updates order with supplied information and updates status to ST_PAID.
Creates Coupon object. Payment information must be validated before
passing to this method.
"""
if self.status != Order.ST_PENDING:
raise ValueError(f"Cannot process non-pending order {self.id}")
self.paid_amount = paid_amount
self.paid_currency = paid_currency
self.payer_email = payer_email
self.payer_name = payer_name
self.payer_surname = payer_surname
self.status = Order.ST_PAID
self.payment_time = datetime.now(pytz.utc)
self.payment_provider = payment_provider
self.save()
log.info("Order %s processed" % self.id)
# create coupon
assert self.quantity == 1
return Coupon.from_order(self)
def find_coupons(self) -> Sequence["Coupon"]:
return list(Coupon.objects.filter(order=self))
@property
def paid(self) -> bool:
return self.status == Order.ST_PAID
class Coupon(models.Model):
ST_ACTIVE = 1
ST_USED = 2
id = models.CharField(max_length=12, primary_key=True)
order = models.ForeignKey(Order, on_delete=models.CASCADE)
year = models.IntegerField()
status = models.IntegerField(
choices=[(ST_ACTIVE, "Active"), (ST_USED, "Used")], default=ST_ACTIVE
)
use_time = models.DateTimeField(null=True, blank=True)
expires = models.DateField(null=True, blank=True)
@staticmethod
def from_order(order: Order, expires: date = None) -> Sequence["Coupon"]:
"""Create couponse for given order"""
ctype = order.coupon_type
payment_year = (
order.payment_time.year if order.payment_time else order.create_time.year
)
if expires is None:
# Come up with sensible expiration date from copon type settings
expires = ctype.deafult_expiration_date.replace(year=payment_year)
# If ticket is sold after this year's expiration date, move it to
# the next year
if date.today() > expires:
expires = expires.replace(year=payment_year + 1)
coupons = []
for x in range(order.quantity):
coupon = Coupon(
id=Coupon.gen_unique_id(),
order=order,
year=payment_year,
expires=expires,
)
coupon.save()
coupons.append(coupon)
log.info(f"Coupon {coupon.id} created")
return coupons
@staticmethod
def spawn(
coupon_type: CouponType,
*,
count: int,
email: str,
expires: date,
notes: str = None,
) -> Sequence["Coupon"]:
log.info("Spawning %s coupons", count)
order = Order.from_type(coupon_type, quantity=count)
order.status = Order.ST_SPAWNED
order.notes = notes
order.payer_email = email
order.payment_time = datetime.now(pytz.utc)
order.save()
return Coupon.from_order(order)
@staticmethod
def gen_unique_id() -> str:
# add some random digits to make order ids less predictable
seed = "".join(random.choice(string.digits) for i in range(10))
year = date.today().strftime("%y")
uniqueid = f"{year}{seed}"
# make sure it is really unique
for attempt in range(10):
try:
Coupon.objects.get(id=uniqueid)
log.warning(f"Generated coupon id '{uniqueid}' is not unique")
except Coupon.DoesNotExist:
return uniqueid
raise RuntimeError("Cannot generate unique coupon id")
@staticmethod
def get_valid_expirations(today, count):
def seq(start):
curmonth = today.month + 1
curyear = start.year
earliest_month = SEASON_START_MONTH + 3
while True:
if curmonth > SEASON_END_MONTH:
curyear += 1
curmonth = 1
if curmonth <= earliest_month:
curmonth = earliest_month
yield date(curyear, curmonth, 1)
curmonth += 1
return list(itertools.islice(seq(today), 0, count))
@property
def active(self):
expired = self.expires and date.today() > self.expires
active = self.status == Coupon.ST_ACTIVE
return active and not expired
@property
def coupon_type(self) -> CouponType:
return self.order.coupon_type
def use(self) -> None:
if not self.active:
raise ValueError(f"Cannot use non-active coupon {self.id}")
self.status = Coupon.ST_USED
self.use_time = datetime.now(pytz.utc)
self.save()
log.info(f"Coupon {self.id} used")
class ScheduledDiscount(models.Model):
date_from = models.DateTimeField()
date_to = models.DateTimeField()
discount = models.IntegerField()
comment = models.TextField(null=True)
@staticmethod
def find_discount_on(now: datetime) -> int:
"""Return discount in percent (0-100) for given time
Or 0 if no discount."""
relevant = ScheduledDiscount.objects.filter(date_from__lte=now, date_to__gt=now)
# Latest discount takes precedence
relevant = relevant.order_by("-date_from")
for sd in relevant:
return sd.discount
# No discounts found
return 0
| agpl-3.0 | -2,247,421,808,241,636,900 | 31.75188 | 88 | 0.607094 | false |
teoliphant/scipy | scipy/sparse/tests/test_base.py | 2 | 65539 | #
# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others
""" Test functions for sparse matrices
"""
__usage__ = """
Build sparse:
python setup.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.sparse.test()'
Run tests if sparse is not installed:
python tests/test_sparse.py
"""
import sys
import warnings
import numpy as np
from numpy import arange, zeros, array, dot, matrix, asmatrix, asarray, \
vstack, ndarray, transpose, diag, kron, inf, conjugate, \
int8, ComplexWarning
import random
from numpy.testing import assert_raises, assert_equal, assert_array_equal, \
assert_array_almost_equal, assert_almost_equal, assert_, \
dec, TestCase, run_module_suite
import scipy.linalg
import scipy.sparse as sparse
from scipy.sparse import csc_matrix, csr_matrix, dok_matrix, \
coo_matrix, lil_matrix, dia_matrix, bsr_matrix, \
eye, isspmatrix, SparseEfficiencyWarning
from scipy.sparse.sputils import supported_dtypes
from scipy.sparse.linalg import splu, expm, inv
warnings.simplefilter('ignore', SparseEfficiencyWarning)
warnings.simplefilter('ignore', ComplexWarning)
#TODO check that spmatrix( ... , copy=X ) is respected
#TODO test prune
#TODO test has_sorted_indices
class _TestCommon:
"""test common functionality shared by all sparse formats"""
def setUp(self):
self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d')
self.datsp = self.spmatrix(self.dat)
def test_empty(self):
"""create empty matrices"""
assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3)))
assert_equal(self.spmatrix((3,3)).nnz, 0)
def test_invalid_shapes(self):
assert_raises(ValueError, self.spmatrix, (-1,3) )
assert_raises(ValueError, self.spmatrix, (3,-1) )
assert_raises(ValueError, self.spmatrix, (-1,-1) )
def test_repr(self):
repr(self.datsp)
def test_str(self):
str(self.datsp)
def test_empty_arithmetic(self):
"""Test manipulating empty matrices. Fails in SciPy SVN <= r1768
"""
shape = (5, 5)
for mytype in [np.dtype('int32'), np.dtype('float32'),
np.dtype('float64'), np.dtype('complex64'),
np.dtype('complex128')]:
a = self.spmatrix(shape, dtype=mytype)
b = a + a
c = 2 * a
d = a * a.tocsc()
e = a * a.tocsr()
f = a * a.tocoo()
for m in [a,b,c,d,e,f]:
assert_equal(m.A, a.A*a.A)
# These fail in all revisions <= r1768:
assert_equal(m.dtype,mytype)
assert_equal(m.A.dtype,mytype)
def test_abs(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(abs(A),abs(self.spmatrix(A)).todense())
def test_neg(self):
A = matrix([[-1, 0, 17],[0, -5, 0],[1, -4, 0],[0,0,0]],'d')
assert_equal(-A,(-self.spmatrix(A)).todense())
def test_real(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.real.todense(),D.real)
def test_imag(self):
D = matrix([[1 + 3j, 2 - 4j]])
A = self.spmatrix(D)
assert_equal(A.imag.todense(),D.imag)
def test_diagonal(self):
"""Does the matrix's .diagonal() method work?
"""
mats = []
mats.append( [[1,0,2]] )
mats.append( [[1],[0],[2]] )
mats.append( [[0,1],[0,2],[0,3]] )
mats.append( [[0,0,1],[0,0,2],[0,3,0]] )
mats.append( kron(mats[0],[[1,2]]) )
mats.append( kron(mats[0],[[1],[2]]) )
mats.append( kron(mats[1],[[1,2],[3,4]]) )
mats.append( kron(mats[2],[[1,2],[3,4]]) )
mats.append( kron(mats[3],[[1,2],[3,4]]) )
mats.append( kron(mats[3],[[1,2,3,4]]) )
for m in mats:
assert_equal(self.spmatrix(m).diagonal(),diag(m))
def test_nonzero(self):
A = array([[1, 0, 1],[0, 1, 1],[ 0, 0, 1]])
Asp = self.spmatrix(A)
A_nz = set( [tuple(ij) for ij in transpose(A.nonzero())] )
Asp_nz = set( [tuple(ij) for ij in transpose(Asp.nonzero())] )
assert_equal(A_nz, Asp_nz)
def test_getrow(self):
assert_array_equal(self.datsp.getrow(1).todense(), self.dat[1,:])
assert_array_equal(self.datsp.getrow(-1).todense(), self.dat[-1,:])
def test_getcol(self):
assert_array_equal(self.datsp.getcol(1).todense(), self.dat[:,1])
assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1])
def test_sum(self):
"""Does the matrix's .sum(axis=...) method work?
"""
assert_array_equal(self.dat.sum(), self.datsp.sum())
assert_array_equal(self.dat.sum(axis=None), self.datsp.sum(axis=None))
assert_array_equal(self.dat.sum(axis=0), self.datsp.sum(axis=0))
assert_array_equal(self.dat.sum(axis=1), self.datsp.sum(axis=1))
def test_mean(self):
"""Does the matrix's .mean(axis=...) method work?
"""
assert_array_equal(self.dat.mean(), self.datsp.mean())
assert_array_equal(self.dat.mean(axis=None), self.datsp.mean(axis=None))
assert_array_equal(self.dat.mean(axis=0), self.datsp.mean(axis=0))
assert_array_equal(self.dat.mean(axis=1), self.datsp.mean(axis=1))
def test_expm(self):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
Mexp = scipy.linalg.expm(M)
sMexp = expm(sM).todense()
assert_array_almost_equal((sMexp - Mexp), zeros((3, 3)))
N = array([[ 3., 0., 1.], [ 0., 2., 0.], [ 0., 0., 0.]])
sN = self.spmatrix(N, shape=(3,3), dtype=float)
Nexp = scipy.linalg.expm(N)
sNexp = expm(sN).todense()
assert_array_almost_equal((sNexp - Nexp), zeros((3, 3)))
def test_inv(self):
M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float)
sM = self.spmatrix(M, shape=(3,3), dtype=float)
sMinv = inv(sM)
assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3))
def test_from_array(self):
A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
A = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_matrix(self):
A = matrix([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
assert_array_equal(self.spmatrix(A).todense(), A)
A = matrix([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
assert_array_equal(self.spmatrix(A).toarray(), A)
assert_array_equal(self.spmatrix(A, dtype='int16').toarray(), A.astype('int16'))
def test_from_list(self):
A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]]
assert_array_equal(self.spmatrix(A).todense(), A)
A = [[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]]
assert_array_equal(self.spmatrix(A).toarray(), array(A))
assert_array_equal(self.spmatrix(A, dtype='int16').todense(), array(A).astype('int16'))
def test_from_sparse(self):
D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
D = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
S = csr_matrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
S = self.spmatrix(D)
assert_array_equal(self.spmatrix(S).toarray(), D)
assert_array_equal(self.spmatrix(S, dtype='int16').toarray(), D.astype('int16'))
#def test_array(self):
# """test array(A) where A is in sparse format"""
# assert_equal( array(self.datsp), self.dat )
def test_todense(self):
# Check C-contiguous (default).
chk = self.datsp.todense()
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.todense(order='C')
assert_array_equal(chk, self.dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.todense(order='F')
assert_array_equal(chk, self.dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with out argument (array).
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk.base is out)
# Check with out array (matrix).
out = np.asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype))
chk = self.datsp.todense(out=out)
assert_array_equal(self.dat, out)
assert_array_equal(self.dat, chk)
assert_(chk is out)
a = matrix([1.,2.,3.])
dense_dot_dense = a * self.dat
check = a * self.datsp.todense()
assert_array_equal(dense_dot_dense, check)
b = matrix([1.,2.,3.,4.]).T
dense_dot_dense = self.dat * b
check2 = self.datsp.todense() * b
assert_array_equal(dense_dot_dense, check2)
def test_toarray(self):
# Check C-contiguous (default).
dat = asarray(self.dat)
chk = self.datsp.toarray()
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check C-contiguous (with arg).
chk = self.datsp.toarray(order='C')
assert_array_equal(chk, dat)
assert_(chk.flags.c_contiguous)
assert_(not chk.flags.f_contiguous)
# Check F-contiguous (with arg).
chk = self.datsp.toarray(order='F')
assert_array_equal(chk, dat)
assert_(not chk.flags.c_contiguous)
assert_(chk.flags.f_contiguous)
# Check with output arg.
out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype)
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
# Check that things are fine when we don't initialize with zeros.
out[...] = 1.
self.datsp.toarray(out=out)
assert_array_equal(chk, dat)
a = array([1.,2.,3.])
dense_dot_dense = dot(a, dat)
check = dot(a, self.datsp.toarray())
assert_array_equal(dense_dot_dense, check)
b = array([1.,2.,3.,4.])
dense_dot_dense = dot(dat, b)
check2 = dot(self.datsp.toarray(), b)
assert_array_equal(dense_dot_dense, check2)
def test_astype(self):
D = array([[1.0 + 3j, 0, 0],
[ 0, 2.0 + 5, 0],
[ 0, 0, 0]])
S = self.spmatrix(D)
for x in supported_dtypes:
assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type
assert_equal(S.astype(x).toarray(), D.astype(x)) # correct values
assert_equal(S.astype(x).format, S.format) # format preserved
def test_asfptype(self):
A = self.spmatrix( arange(6,dtype='int32').reshape(2,3) )
assert_equal( A.dtype , np.dtype('int32') )
assert_equal( A.asfptype().dtype, np.dtype('float64') )
assert_equal( A.asfptype().format, A.format )
assert_equal( A.astype('int16').asfptype().dtype , np.dtype('float32') )
assert_equal( A.astype('complex128').asfptype().dtype , np.dtype('complex128') )
B = A.asfptype()
C = B.asfptype()
assert_( B is C )
def test_mul_scalar(self):
assert_array_equal(self.dat*2,(self.datsp*2).todense())
assert_array_equal(self.dat*17.3,(self.datsp*17.3).todense())
def test_rmul_scalar(self):
assert_array_equal(2*self.dat,(2*self.datsp).todense())
assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense())
def test_add(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = b + a
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
def test_radd(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = a + b
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
def test_sub(self):
assert_array_equal((self.datsp - self.datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.datsp - A).todense(),self.dat - A.todense())
assert_array_equal((A - self.datsp).todense(),A.todense() - self.dat)
def test_rsub(self):
assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.dat - A),self.dat - A.todense())
assert_array_equal((A - self.dat),A.todense() - self.dat)
assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat)
assert_array_equal(self.datsp - A.todense(),self.dat - A.todense())
def test_add0(self):
""" Adding 0 to a sparse matrix """
assert_array_equal((self.datsp + 0).todense(), self.dat)
# use sum (which takes 0 as a starting value)
sumS = sum([k * self.datsp for k in range(1, 3)])
sumD = sum([k * self.dat for k in range(1, 3)])
assert_almost_equal(sumS.todense(), sumD)
def test_elementwise_multiply(self):
# real/real
A = array([[4,0,9],[2,-3,5]])
B = array([[0,7,0],[0,-4,0]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal( Asp.multiply(Bsp).todense(), A*B) #sparse/sparse
assert_almost_equal( Asp.multiply(B), A*B) #sparse/dense
# complex/complex
C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Csp = self.spmatrix(C)
Dsp = self.spmatrix(D)
assert_almost_equal( Csp.multiply(Dsp).todense(), C*D) #sparse/sparse
assert_almost_equal( Csp.multiply(D), C*D) #sparse/dense
# real/complex
assert_almost_equal( Asp.multiply(Dsp).todense(), A*D) #sparse/sparse
assert_almost_equal( Asp.multiply(D), A*D) #sparse/dense
def test_elementwise_divide(self):
expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]]
assert_array_equal((self.datsp / self.datsp).todense(),expected)
denom = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
res = matrix([[1,0,0,0.5],[-3,0,inf,0],[0,0.25,0,0]],'d')
assert_array_equal((self.datsp / denom).todense(),res)
# complex
A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]])
B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]])
Asp = self.spmatrix(A)
Bsp = self.spmatrix(B)
assert_almost_equal( (Asp / Bsp).todense(), A/B)
def test_pow(self):
A = matrix([[1,0,2,0],[0,3,4,0],[0,5,0,0],[0,6,7,8]])
B = self.spmatrix( A )
for exponent in [0,1,2,3]:
assert_array_equal((B**exponent).todense(),A**exponent)
#invalid exponents
for exponent in [-1, 2.2, 1 + 3j]:
self.assertRaises( Exception, B.__pow__, exponent )
#nonsquare matrix
B = self.spmatrix(A[:3,:])
self.assertRaises( Exception, B.__pow__, 1 )
def test_rmatvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
assert_array_almost_equal([1,2,3,4]*M, dot([1,2,3,4], M.toarray()))
row = matrix([[1,2,3,4]])
assert_array_almost_equal(row*M, row*M.todense())
def test_small_multiplication(self):
"""test that A*x works for x with shape () (1,) and (1,1)
"""
A = self.spmatrix([[1],[2],[3]])
assert_(isspmatrix(A * array(1)))
assert_equal((A * array(1)).todense(), [[1],[2],[3]])
assert_equal(A * array([1]), array([1,2,3]))
assert_equal(A * array([[1]]), array([[1],[2],[3]]))
def test_matvec(self):
M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]))
col = matrix([1,2,3]).T
assert_array_almost_equal(M * col, M.todense() * col)
#check result dimensions (ticket #514)
assert_equal((M * array([1,2,3])).shape,(4,))
assert_equal((M * array([[1],[2],[3]])).shape,(4,1))
assert_equal((M * matrix([[1],[2],[3]])).shape,(4,1))
#check result type
assert_(isinstance( M * array([1,2,3]), ndarray))
assert_(isinstance( M * matrix([1,2,3]).T, matrix))
#ensure exception is raised for improper dimensions
bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]),
matrix([1,2,3]), matrix([[1],[2]])]
for x in bad_vecs:
assert_raises(ValueError, M.__mul__, x)
# Should this be supported or not?!
#flat = array([1,2,3])
#assert_array_almost_equal(M*flat, M.todense()*flat)
# Currently numpy dense matrices promote the result to a 1x3 matrix,
# whereas sparse matrices leave the result as a rank-1 array. Which
# is preferable?
# Note: the following command does not work. Both NumPy matrices
# and spmatrices should raise exceptions!
# assert_array_almost_equal(M*[1,2,3], M.todense()*[1,2,3])
# The current relationship between sparse matrix products and array
# products is as follows:
assert_array_almost_equal(M*array([1,2,3]), dot(M.A,[1,2,3]))
assert_array_almost_equal(M*[[1],[2],[3]], asmatrix(dot(M.A,[1,2,3])).T)
# Note that the result of M * x is dense if x has a singleton dimension.
# Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col)
# is rank-2. Is this desirable?
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal( asp*b, a*b)
assert_array_almost_equal( a*bsp, a*b)
assert_array_almost_equal( a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix( [[1,2],[3,4]] )
B = self.spmatrix( [[1,2],[3,4],[5,6]] )
assert_raises(ValueError, A.__mul__, B)
def test_matmat_dense(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
asp = self.spmatrix(a)
# check both array and matrix types
bs = [ array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]]) ]
for b in bs:
result = asp*b
assert_( isinstance(result, type(b)) )
assert_equal( result.shape, (4,2) )
assert_equal( result, dot(a,b) )
def test_sparse_format_conversions(self):
A = sparse.kron( [[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]] )
D = A.todense()
A = self.spmatrix(A)
for format in ['bsr','coo','csc','csr','dia','dok','lil']:
a = A.asformat(format)
assert_equal(a.format,format)
assert_array_equal(a.todense(), D)
b = self.spmatrix(D+3j).asformat(format)
assert_equal(b.format,format)
assert_array_equal(b.todense(), D+3j)
c = eval(format + '_matrix')(A)
assert_equal(c.format,format)
assert_array_equal(c.todense(), D)
def test_tobsr(self):
x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]])
y = array([[0,1,2],[3,0,5]])
A = kron(x,y)
Asp = self.spmatrix(A)
for format in ['bsr']:
fn = getattr(Asp, 'to' + format )
for X in [ 1, 2, 3, 6 ]:
for Y in [ 1, 2, 3, 4, 6, 12]:
assert_equal( fn(blocksize=(X,Y)).todense(), A)
def test_transpose(self):
a = self.datsp.transpose()
b = self.dat.transpose()
assert_array_equal(a.todense(), b)
assert_array_equal(a.transpose().todense(), self.dat)
assert_array_equal( self.spmatrix((3,4)).T.todense(), zeros((4,3)) )
def test_add_dense(self):
""" adding a dense matrix to a sparse matrix
"""
sum1 = self.dat + self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = self.datsp + self.dat
assert_array_equal(sum2, 2*self.dat)
def test_sub_dense(self):
""" subtracting a dense matrix to/from a sparse matrix
"""
sum1 = 3*self.dat - self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = 3*self.datsp - self.dat
assert_array_equal(sum2, 2*self.dat)
def test_copy(self):
""" Check whether the copy=True and copy=False keywords work
"""
A = self.datsp
#check that copy preserves format
assert_equal(A.copy().format, A.format)
assert_equal(A.__class__(A,copy=True).format, A.format)
assert_equal(A.__class__(A,copy=False).format, A.format)
assert_equal(A.copy().todense(), A.todense())
assert_equal(A.__class__(A,copy=True).todense(), A.todense())
assert_equal(A.__class__(A,copy=False).todense(), A.todense())
#check that XXX_matrix.toXXX() works
toself = getattr(A,'to' + A.format)
assert_equal(toself().format, A.format)
assert_equal(toself(copy=True).format, A.format)
assert_equal(toself(copy=False).format, A.format)
assert_equal(toself().todense(), A.todense())
assert_equal(toself(copy=True).todense(), A.todense())
assert_equal(toself(copy=False).todense(), A.todense())
# check whether the data is copied?
# TODO: deal with non-indexable types somehow
B = A.copy()
try:
B[0,0] += 1
assert_(B[0,0] != A[0,0])
except NotImplementedError:
# not all sparse matrices can be indexed
pass
except TypeError:
# not all sparse matrices can be indexed
pass
# Eventually we'd like to allow matrix products between dense
# and sparse matrices using the normal dot() function:
#def test_dense_dot_sparse(self):
# a = array([1.,2.,3.])
# dense_dot_dense = dot(a, self.dat)
# dense_dot_sparse = dot(a, self.datsp)
# assert_array_equal(dense_dot_dense, dense_dot_sparse)
#def test_sparse_dot_dense(self):
# b = array([1.,2.,3.,4.])
# dense_dot_dense = dot(self.dat, b)
# dense_dot_sparse = dot(self.datsp, b)
# assert_array_equal(dense_dot_dense, dense_dot_sparse)
class _TestInplaceArithmetic:
def test_imul_scalar(self):
a = self.datsp.copy()
a *= 2
assert_array_equal(self.dat*2,a.todense())
a = self.datsp.copy()
a *= 17.3
assert_array_equal(self.dat*17.3,a.todense())
def test_idiv_scalar(self):
a = self.datsp.copy()
a /= 2
assert_array_equal(self.dat/2,a.todense())
a = self.datsp.copy()
a /= 17.3
assert_array_equal(self.dat/17.3,a.todense())
class _TestGetSet:
def test_setelement(self):
A = self.spmatrix((3,4))
A[ 0, 0] = 0 # bug 870
A[ 1, 2] = 4.0
A[ 0, 1] = 3
A[ 2, 0] = 2.0
A[ 0,-1] = 8
A[-1,-2] = 7
A[ 0, 1] = 5
assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]])
for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]:
assert_raises(IndexError, A.__setitem__, ij, 123.0)
for v in [[1,2,3], array([1,2,3])]:
assert_raises(ValueError, A.__setitem__, (0,0), v)
for v in [3j]:
assert_raises(TypeError, A.__setitem__, (0,0), v)
def test_getelement(self):
D = array([[1,0,0],
[4,3,0],
[0,2,0],
[0,0,0]])
A = self.spmatrix(D)
M,N = D.shape
for i in range(-M, M):
for j in range(-N, N):
assert_equal(A[i,j], D[i,j])
for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1)]:
assert_raises(IndexError, A.__getitem__, ij)
class _TestSolve:
def test_solve(self):
""" Test whether the lu_solve command segfaults, as reported by Nils
Wagner for a 64-bit machine, 02 March 2005 (EJS)
"""
n = 20
np.random.seed(0) #make tests repeatable
A = zeros((n,n), dtype=complex)
x = np.random.rand(n)
y = np.random.rand(n-1)+1j*np.random.rand(n-1)
r = np.random.rand(n)
for i in range(len(x)):
A[i,i] = x[i]
for i in range(len(y)):
A[i,i+1] = y[i]
A[i+1,i] = conjugate(y[i])
A = self.spmatrix(A)
x = splu(A).solve(r)
assert_almost_equal(A*x,r)
class _TestHorizSlicing:
"""Tests horizontal slicing (e.g. [0, :]). Tests for individual sparse
matrix types that implement this should derive from this class.
"""
def test_get_horiz_slice(self):
"""Test for new slice functionality (EJS)"""
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[1,:], A[1,:].todense())
assert_array_equal(B[1,2:5], A[1,2:5].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1, 1:3], D[1, 1:3].todense())
# Now test slicing when a row contains only zeros
E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1, 1:3], F[1, 1:3].todense())
assert_array_equal(E[2, -2:], F[2, -2:].A)
# The following should raise exceptions:
caught = 0
try:
a = A[:,11]
except IndexError:
caught += 1
try:
a = A[6,3:7]
except IndexError:
caught += 1
assert_(caught == 2)
class _TestVertSlicing:
"""Tests vertical slicing (e.g. [:, 0]). Tests for individual sparse
matrix types that implement this should derive from this class.
"""
def test_get_vert_slice(self):
"""Test for new slice functionality (EJS)"""
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(B[2:5,0], A[2:5,0].todense())
assert_array_equal(B[:,1], A[:,1].todense())
C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]])
D = self.spmatrix(C)
assert_array_equal(C[1:3, 1], D[1:3, 1].todense())
assert_array_equal(C[:, 2], D[:, 2].todense())
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[:, 1], F[:, 1].todense())
assert_array_equal(E[-2:, 2], F[-2:, 2].todense())
# The following should raise exceptions:
caught = 0
try:
a = A[:,11]
except IndexError:
caught += 1
try:
a = A[6,3:7]
except IndexError:
caught += 1
assert_(caught == 2)
class _TestBothSlicing:
"""Tests vertical and horizontal slicing (e.g. [:,0:2]). Tests for
individual sparse matrix types that implement this should derive from this
class.
"""
def test_get_slices(self):
B = asmatrix(arange(50.).reshape(5,10))
A = self.spmatrix(B)
assert_array_equal(A[2:5,0:3].todense(), B[2:5,0:3])
assert_array_equal(A[1:,:-1].todense(), B[1:,:-1])
assert_array_equal(A[:-1,1:].todense(), B[:-1,1:])
# Now test slicing when a column contains only zeros
E = matrix([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]])
F = self.spmatrix(E)
assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].todense())
assert_array_equal(E[:, 1:], F[:, 1:].todense())
class _TestFancyIndexing:
"""Tests fancy indexing features. The tests for any matrix formats
that implement these features should derive from this class.
"""
def test_fancy_indexing_set(self):
n, m = (5, 10)
def _test_set(i, j, nitems):
A = self.spmatrix((n, m))
A[i, j] = 1
assert_almost_equal(A.sum(), nitems)
assert_almost_equal(A[i, j], 1)
# [i,j]
for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)),
(array(-1), array(-2))]:
_test_set(i, j, 1)
# [i,1:2]
for i, j in [(2, slice(m)), (2, slice(5, -2)), (array(2), slice(5, -2))]:
_test_set(i, j, 3)
def test_fancy_indexing(self):
B = asmatrix(arange(50).reshape(5,10))
A = self.spmatrix( B )
# [i,j]
assert_equal(A[2,3], B[2,3])
assert_equal(A[-1,8], B[-1,8])
assert_equal(A[-1,-2],B[-1,-2])
assert_equal(A[array(-1),-2],B[-1,-2])
assert_equal(A[-1,array(-2)],B[-1,-2])
assert_equal(A[array(-1),array(-2)],B[-1,-2])
# [i,1:2]
assert_equal(A[2,:].todense(), B[2,:])
assert_equal(A[2,5:-2].todense(),B[2,5:-2])
assert_equal(A[array(2),5:-2].todense(),B[2,5:-2])
# [i,[1,2]]
assert_equal(A[3,[1,3]].todense(), B[3,[1,3]])
assert_equal(A[-1,[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),[2,-5]].todense(),B[-1,[2,-5]])
assert_equal(A[-1,array([2,-5])].todense(),B[-1,[2,-5]])
assert_equal(A[array(-1),array([2,-5])].todense(),B[-1,[2,-5]])
# [1:2,j]
assert_equal(A[:,2].todense(), B[:,2])
assert_equal(A[3:4,9].todense(), B[3:4,9])
assert_equal(A[1:4,-5].todense(),B[1:4,-5])
assert_equal(A[2:-1,3].todense(),B[2:-1,3])
assert_equal(A[2:-1,array(3)].todense(),B[2:-1,3])
# [1:2,1:2]
assert_equal(A[1:2,1:2].todense(),B[1:2,1:2])
assert_equal(A[4:,3:].todense(), B[4:,3:])
assert_equal(A[:4,:5].todense(), B[:4,:5])
assert_equal(A[2:-1,:5].todense(),B[2:-1,:5])
# [1:2,[1,2]]
assert_equal(A[:,[2,8,3,-1]].todense(),B[:,[2,8,3,-1]])
assert_equal(A[3:4,[9]].todense(), B[3:4,[9]])
assert_equal(A[1:4,[-1,-5]].todense(), B[1:4,[-1,-5]])
assert_equal(A[1:4,array([-1,-5])].todense(), B[1:4,[-1,-5]])
# [[1,2],j]
assert_equal(A[[1,3],3].todense(), B[[1,3],3])
assert_equal(A[[2,-5],-4].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),-4].todense(), B[[2,-5],-4])
assert_equal(A[[2,-5],array(-4)].todense(), B[[2,-5],-4])
assert_equal(A[array([2,-5]),array(-4)].todense(), B[[2,-5],-4])
# [[1,2],1:2]
assert_equal(A[[1,3],:].todense(), B[[1,3],:])
assert_equal(A[[2,-5],8:-1].todense(),B[[2,-5],8:-1])
assert_equal(A[array([2,-5]),8:-1].todense(),B[[2,-5],8:-1])
# [[1,2],[1,2]]
assert_equal(A[[1,3],[2,4]], B[[1,3],[2,4]])
assert_equal(A[[-1,-3],[2,-4]],B[[-1,-3],[2,-4]])
assert_equal(A[array([-1,-3]),[2,-4]],B[[-1,-3],[2,-4]])
assert_equal(A[[-1,-3],array([2,-4])],B[[-1,-3],[2,-4]])
assert_equal(A[array([-1,-3]),array([2,-4])],B[[-1,-3],[2,-4]])
# [[[1],[2]],[1,2]]
assert_equal(A[[[1],[3]],[2,4]].todense(), B[[[1],[3]],[2,4]])
assert_equal(A[[[-1],[-3],[-2]],[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),[2,-4]].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[[[-1],[-3],[-2]],array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
assert_equal(A[array([[-1],[-3],[-2]]),array([2,-4])].todense(),B[[[-1],[-3],[-2]],[2,-4]])
# [i]
assert_equal(A[1,:].todense(), B[1,:])
assert_equal(A[-2,:].todense(),B[-2,:])
assert_equal(A[array(-2),:].todense(),B[-2,:])
# [1:2]
assert_equal(A[1:4].todense(), B[1:4])
assert_equal(A[1:-2].todense(),B[1:-2])
# [[1,2]]
assert_equal(A[[1,3]].todense(), B[[1,3]])
assert_equal(A[[-1,-3]].todense(),B[[-1,-3]])
assert_equal(A[array([-1,-3])].todense(),B[[-1,-3]])
# [[1,2],:][:,[1,2]]
assert_equal(A[[1,3],:][:,[2,4]].todense(), B[[1,3],:][:,[2,4]] )
assert_equal(A[[-1,-3],:][:,[2,-4]].todense(), B[[-1,-3],:][:,[2,-4]] )
assert_equal(A[array([-1,-3]),:][:,array([2,-4])].todense(), B[[-1,-3],:][:,[2,-4]] )
# [:,[1,2]][[1,2],:]
assert_equal(A[:,[1,3]][[2,4],:].todense(), B[:,[1,3]][[2,4],:] )
assert_equal(A[:,[-1,-3]][[2,-4],:].todense(), B[:,[-1,-3]][[2,-4],:] )
assert_equal(A[:,array([-1,-3])][array([2,-4]),:].todense(), B[:,[-1,-3]][[2,-4],:] )
# Check bug reported by Robert Cimrman:
# http://thread.gmane.org/gmane.comp.python.scientific.devel/7986
s = slice(int8(2),int8(4),None)
assert_equal(A[s,:].todense(), B[2:4,:])
assert_equal(A[:,s].todense(), B[:,2:4])
def test_fancy_indexing_randomized(self):
random.seed(0) # make runs repeatable
NUM_SAMPLES = 50
M = 6
N = 4
D = np.asmatrix(np.random.rand(M,N))
D = np.multiply(D, D > 0.5)
I = np.random.random_integers(-M + 1, M - 1, size=NUM_SAMPLES)
J = np.random.random_integers(-N + 1, N - 1, size=NUM_SAMPLES)
S = self.spmatrix(D)
assert_equal(S[I,J], D[I,J])
I_bad = I + M
J_bad = J - N
assert_raises(IndexError, S.__getitem__, (I_bad,J))
assert_raises(IndexError, S.__getitem__, (I,J_bad))
class _TestArithmetic:
"""
Test real/complex arithmetic
"""
def arith_init(self):
#these can be represented exactly in FP (so arithmetic should be exact)
self.A = matrix([[ -1.5, 6.5, 0, 2.25, 0, 0],
[ 3.125, -7.875, 0.625, 0, 0, 0],
[ 0, 0, -0.125, 1.0, 0, 0],
[ 0, 0, 8.375, 0, 0, 0]],'float64')
self.B = matrix([[ 0.375, 0, 0, 0, -5, 2.5],
[ 14.25, -3.75, 0, 0, -0.125, 0],
[ 0, 7.25, 0, 0, 0, 0],
[ 18.5, -0.0625, 0, 0, 0, 0]],'complex128')
self.B.imag = matrix([[ 1.25, 0, 0, 0, 6, -3.875],
[ 2.25, 4.125, 0, 0, 0, 2.75],
[ 0, 4.125, 0, 0, 0, 0],
[ -0.0625, 0, 0, 0, 0, 0]],'float64')
#fractions are all x/16ths
assert_array_equal((self.A*16).astype('int32'),16*self.A)
assert_array_equal((self.B.real*16).astype('int32'),16*self.B.real)
assert_array_equal((self.B.imag*16).astype('int32'),16*self.B.imag)
self.Asp = self.spmatrix(self.A)
self.Bsp = self.spmatrix(self.B)
def test_add_sub(self):
self.arith_init()
#basic tests
assert_array_equal((self.Asp+self.Bsp).todense(),self.A+self.B)
#check conversions
for x in supported_dtypes:
A = self.A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
B = self.B.astype(y)
Bsp = self.spmatrix(B)
#addition
D1 = A + B
S1 = Asp + Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp + B,D1) #check sparse + dense
assert_array_equal(A + Bsp,D1) #check dense + sparse
#subtraction
D1 = A - B
S1 = Asp - Bsp
assert_equal(S1.dtype,D1.dtype)
assert_array_equal(S1.todense(),D1)
assert_array_equal(Asp - B,D1) #check sparse - dense
assert_array_equal(A - Bsp,D1) #check dense - sparse
def test_mu(self):
self.arith_init()
#basic tests
assert_array_equal((self.Asp*self.Bsp.T).todense(),self.A*self.B.T)
for x in supported_dtypes:
A = self.A.astype(x)
Asp = self.spmatrix(A)
for y in supported_dtypes:
B = self.B.astype(y)
Bsp = self.spmatrix(B)
D1 = A * B.T
S1 = Asp * Bsp.T
assert_array_equal(S1.todense(),D1)
assert_equal(S1.dtype,D1.dtype)
class _Test2DSlicingRegression:
def test_non_unit_stride_2d_indexing_raises_exception(self):
# Regression test -- used to silently ignore the stride.
try:
self.spmatrix((500, 500))[0:100:2, 0:100:2]
except ValueError:
return
assert_(False) # Should not happen.
class TestCSR(_TestCommon, _TestGetSet, _TestSolve,
_TestInplaceArithmetic, _TestArithmetic,
_TestHorizSlicing, _TestVertSlicing, _TestBothSlicing,
_TestFancyIndexing, _Test2DSlicingRegression, TestCase):
spmatrix = csr_matrix
@dec.knownfailureif(True, "Fancy indexing is known to be broken for CSR" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
def test_constructor1(self):
b = matrix([[0,4,0],
[3,0,0],
[0,2,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[4,3,2])
assert_array_equal(bsp.indices,[1,0,1])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_equal(bsp.getnnz(),3)
assert_equal(bsp.getformat(),'csr')
assert_array_equal(bsp.todense(),b)
def test_constructor2(self):
b = zeros((6,6),'d')
b[3,4] = 5
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[4])
assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1])
assert_array_almost_equal(bsp.todense(),b)
def test_constructor3(self):
b = matrix([[1,0],
[0,2],
[3,0]],'d')
bsp = csr_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,3])
assert_array_equal(bsp.indices,[0,1,0])
assert_array_equal(bsp.indptr,[0,1,2,3])
assert_array_almost_equal(bsp.todense(),b)
### currently disabled
## def test_constructor4(self):
## """try using int64 indices"""
## data = arange( 6 ) + 1
## col = array( [1, 2, 1, 0, 0, 2], dtype='int64' )
## ptr = array( [0, 2, 4, 6], dtype='int64' )
##
## a = csr_matrix( (data, col, ptr), shape = (3,3) )
##
## b = matrix([[0,1,2],
## [4,3,0],
## [5,0,6]],'d')
##
## assert_equal(a.indptr.dtype,numpy.dtype('int64'))
## assert_equal(a.indices.dtype,numpy.dtype('int64'))
## assert_array_equal(a.todense(),b)
def test_constructor4(self):
"""using (data, ij) format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csr = csr_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csr.todense())
def test_constructor5(self):
"""infer dimensions from arrays"""
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csr = csr_matrix((data, indices, indptr))
assert_array_equal(csr.shape,(3,6))
def test_sort_indices(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
bsp = asp.copy()
asp.sort_indices( )
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_eliminate_zeros(self):
data = array( [1, 0, 0, 0, 2, 0, 3, 0] )
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
bsp = asp.copy()
asp.eliminate_zeros( )
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csr_matrix(np.arange(20).reshape(4, 5) / 20.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csr_matrix( (data, indices, indptr), shape=(2,10) )
data = arange( 6 )
indices = array( [8, 1, 5, 7, 2, 4] )
indptr = array( [0, 2, 6] )
bsp = csr_matrix( (data, indices, indptr), shape=(2,10) )
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
class TestCSC(_TestCommon, _TestGetSet, _TestSolve,
_TestInplaceArithmetic, _TestArithmetic,
_TestHorizSlicing, _TestVertSlicing, _TestBothSlicing,
_TestFancyIndexing, _Test2DSlicingRegression, TestCase):
spmatrix = csc_matrix
@dec.knownfailureif(True, "Fancy indexing is known to be broken for CSC" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
def test_constructor1(self):
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2,1,3])
assert_array_equal(bsp.indices,[0,2,1,2])
assert_array_equal(bsp.indptr,[0,1,2,3,4])
assert_equal(bsp.getnnz(),4)
assert_equal(bsp.shape,b.shape)
assert_equal(bsp.getformat(),'csc')
def test_constructor2(self):
b = zeros((6,6),'d')
b[2,4] = 5
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[5])
assert_array_equal(bsp.indices,[2])
assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1])
def test_constructor3(self):
b = matrix([[1,0],[0,0],[0,2]],'d')
bsp = csc_matrix(b)
assert_array_almost_equal(bsp.data,[1,2])
assert_array_equal(bsp.indices,[0,2])
assert_array_equal(bsp.indptr,[0,1,2])
def test_constructor4(self):
"""using (data, ij) format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
ij = vstack((row,col))
csc = csc_matrix((data,ij),(4,3))
assert_array_equal(arange(12).reshape(4,3),csc.todense())
def test_constructor5(self):
"""infer dimensions from arrays"""
indptr = array([0,1,3,3])
indices = array([0,5,1,2])
data = array([1,2,3,4])
csc = csc_matrix((data, indices, indptr))
assert_array_equal(csc.shape,(6,3))
def test_eliminate_zeros(self):
data = array( [1, 0, 0, 0, 2, 0, 3, 0] )
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = csc_matrix( (data, indices, indptr), shape=(10,2) )
bsp = asp.copy()
asp.eliminate_zeros( )
assert_array_equal(asp.nnz, 3)
assert_array_equal(asp.data,[1, 2, 3])
assert_array_equal(asp.todense(),bsp.todense())
def test_sort_indices(self):
data = arange( 5 )
row = array( [7, 2, 1, 5, 4] )
ptr = [0, 3, 5]
asp = csc_matrix( (data, row, ptr), shape=(10,2) )
bsp = asp.copy()
asp.sort_indices()
assert_array_equal(asp.indices,[1, 2, 7, 4, 5])
assert_array_equal(asp.todense(),bsp.todense())
def test_ufuncs(self):
X = csc_matrix(np.arange(21).reshape(7, 3) / 21.)
for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh",
"arcsinh", "arctanh", "rint", "sign", "expm1", "log1p",
"deg2rad", "rad2deg", "floor", "ceil", "trunc"]:
assert_equal(hasattr(csr_matrix, f), True)
X2 = getattr(X, f)()
assert_equal(X.shape, X2.shape)
assert_array_equal(X.indices, X2.indices)
assert_array_equal(X.indptr, X2.indptr)
assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray()))
def test_unsorted_arithmetic(self):
data = arange( 5 )
indices = array( [7, 2, 1, 5, 4] )
indptr = array( [0, 3, 5] )
asp = csc_matrix( (data, indices, indptr), shape=(10,2) )
data = arange( 6 )
indices = array( [8, 1, 5, 7, 2, 4] )
indptr = array( [0, 2, 6] )
bsp = csc_matrix( (data, indices, indptr), shape=(10,2) )
assert_equal((asp + bsp).todense(), asp.todense() + bsp.todense())
class TestDOK(_TestCommon, _TestGetSet, _TestSolve, TestCase):
spmatrix = dok_matrix
def test_mult(self):
A = dok_matrix((10,10))
A[0,3] = 10
A[5,6] = 20
D = A*A.T
E = A*A.H
assert_array_equal(D.A, E.A)
def test_add(self):
A = dok_matrix((3,2))
A[0,1] = -10
A[2,0] = 20
A = A + 10
B = matrix([[10, 0], [10, 10], [30, 10]])
assert_array_equal(A.todense(), B)
def test_convert(self):
"""Test provided by Andrew Straw. Fails in SciPy <= r1477.
"""
(m, n) = (6, 7)
a=dok_matrix((m, n))
# set a few elements, but none in the last column
a[2,1]=1
a[0,2]=2
a[3,1]=3
a[1,5]=4
a[4,3]=5
a[4,2]=6
# assert that the last column is all zeros
assert_array_equal( a.toarray()[:,n-1], zeros(m,) )
# make sure it still works for CSC format
csc=a.tocsc()
assert_array_equal( csc.toarray()[:,n-1], zeros(m,) )
# now test CSR
(m, n) = (n, m)
b = a.transpose()
assert_equal(b.shape, (m, n))
# assert that the last row is all zeros
assert_array_equal( b.toarray()[m-1,:], zeros(n,) )
# make sure it still works for CSR format
csr=b.tocsr()
assert_array_equal( csr.toarray()[m-1,:], zeros(n,))
def test_set_slice(self):
"""Test for slice functionality (EJS)"""
A = dok_matrix((5,10))
B = zeros((5,10), float)
A[:,0] = 1
B[:,0] = 1
assert_array_equal(A.todense(), B)
A[1,:] = 2
B[1,:] = 2
assert_array_equal(A.todense(), B)
A[:,:] = 3
B[:,:] = 3
assert_array_equal(A.todense(), B)
A[1:5, 3] = 4
B[1:5, 3] = 4
assert_array_equal(A.todense(), B)
A[1, 3:6] = 5
B[1, 3:6] = 5
assert_array_equal(A.todense(), B)
A[1:4, 3:6] = 6
B[1:4, 3:6] = 6
assert_array_equal(A.todense(), B)
A[1, 3:10:3] = 7
B[1, 3:10:3] = 7
assert_array_equal(A.todense(), B)
A[1:5, 0] = range(1,5)
B[1:5, 0] = range(1,5)
assert_array_equal(A.todense(), B)
A[0, 1:10:2] = xrange(1,10,2)
B[0, 1:10:2] = xrange(1,10,2)
assert_array_equal(A.todense(), B)
caught = 0
# The next 6 commands should raise exceptions
try:
A[0,0] = range(100)
except ValueError:
caught += 1
try:
A[0,0] = arange(100)
except ValueError:
caught += 1
try:
A[0,:] = range(100)
except ValueError:
caught += 1
try:
A[:,1] = range(100)
except ValueError:
caught += 1
try:
A[:,1] = A.copy()
except:
caught += 1
assert_equal(caught,5)
def test_ctor(self):
caught = 0
# Empty ctor
try:
A = dok_matrix()
except TypeError, e:
caught+=1
assert_equal(caught, 1)
# Dense ctor
b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d')
A = dok_matrix(b)
assert_equal(A.todense(), b)
# Sparse ctor
c = csr_matrix(b)
assert_equal(A.todense(), c.todense())
def test_resize(self):
"""A couple basic tests of the resize() method.
resize(shape) resizes the array in-place.
"""
a = dok_matrix((5,5))
a[:,0] = 1
a.resize((2,2))
expected1 = array([[1,0],[1,0]])
assert_array_equal(a.todense(), expected1)
a.resize((3,2))
expected2 = array([[1,0],[1,0],[0,0]])
assert_array_equal(a.todense(), expected2)
def test_ticket1160(self):
"""Regression test for ticket #1160."""
a = dok_matrix((3,3))
a[0,0] = 0
# This assert would fail, because the above assignment would
# incorrectly call __set_item__ even though the value was 0.
assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys")
# Slice assignments were also affected.
b = dok_matrix((3,3))
b[:,0] = 0
assert_(len(b.keys())==0, "Unexpected entries in keys")
# The following five tests are duplicates from _TestCommon, so they can be
# marked as knownfail for Python 2.4. Once 2.4 is no longer supported,
# these duplicates can be removed again.
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_add_dense(self):
""" adding a dense matrix to a sparse matrix
"""
sum1 = self.dat + self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = self.datsp + self.dat
assert_array_equal(sum2, 2*self.dat)
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_radd(self):
a = self.dat.copy()
a[0,2] = 2.0
b = self.datsp
c = a + b
assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]])
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_rsub(self):
assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]])
A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d'))
assert_array_equal((self.dat - A),self.dat - A.todense())
assert_array_equal((A - self.dat),A.todense() - self.dat)
assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat)
assert_array_equal(self.datsp - A.todense(),self.dat - A.todense())
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_matmat_sparse(self):
a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])
b = matrix([[0,1],[1,0],[0,2]],'d')
asp = self.spmatrix(a)
bsp = self.spmatrix(b)
assert_array_almost_equal((asp*bsp).todense(), a*b)
assert_array_almost_equal( asp*b, a*b)
assert_array_almost_equal( a*bsp, a*b)
assert_array_almost_equal( a2*bsp, a*b)
# Now try performing cross-type multplication:
csp = bsp.tocsc()
c = b
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocsr()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
csp = bsp.tocoo()
assert_array_almost_equal((asp*csp).todense(), a*c)
assert_array_almost_equal( asp*c, a*c)
assert_array_almost_equal( a*csp, a*c)
assert_array_almost_equal( a2*csp, a*c)
# Test provided by Andy Fraser, 2006-03-26
L = 30
frac = .3
random.seed(0) # make runs repeatable
A = zeros((L,2))
for i in xrange(L):
for j in xrange(2):
r = random.random()
if r < frac:
A[i,j] = r/frac
A = self.spmatrix(A)
B = A*A.T
assert_array_almost_equal(B.todense(), A.todense() * A.T.todense())
assert_array_almost_equal(B.todense(), A.todense() * A.todense().T)
# check dimension mismatch 2x2 times 3x2
A = self.spmatrix( [[1,2],[3,4]] )
B = self.spmatrix( [[1,2],[3,4],[5,6]] )
assert_raises(ValueError, A.__mul__, B)
@dec.knownfailureif(sys.version[:3] == '2.4', "See ticket 1559")
def test_sub_dense(self):
""" subtracting a dense matrix to/from a sparse matrix
"""
sum1 = 3*self.dat - self.datsp
assert_array_equal(sum1, 2*self.dat)
sum2 = 3*self.datsp - self.dat
assert_array_equal(sum2, 2*self.dat)
class TestLIL( _TestCommon, _TestHorizSlicing, _TestVertSlicing,
_TestBothSlicing, _TestGetSet, _TestSolve,
_TestArithmetic, _TestInplaceArithmetic, _TestFancyIndexing,
TestCase):
spmatrix = lil_matrix
B = lil_matrix((4,3))
B[0,0] = 2
B[1,2] = 7
B[2,1] = 3
B[3,0] = 10
@dec.knownfailureif(True, "Fancy indexing is known to be broken for LIL" \
" matrices")
def test_fancy_indexing_set(self):
_TestFancyIndexing.test_fancy_indexing_set(self)
@dec.knownfailureif(True, "Fancy indexing is known to be broken for LIL" \
" matrices")
def test_fancy_indexing_randomized(self):
_TestFancyIndexing.test_fancy_indexing_randomized(self)
def test_dot(self):
A = matrix(zeros((10,10)))
A[0,3] = 10
A[5,6] = 20
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
assert_array_equal(A * A.T, (B * B.T).todense())
assert_array_equal(A * A.H, (B * B.H).todense())
def test_scalar_mul(self):
x = lil_matrix((3,3))
x[0,0] = 2
x = x*2
assert_equal(x[0,0],4)
x = x*0
assert_equal(x[0,0],0)
def test_reshape(self):
x = lil_matrix((4,3))
x[0,0] = 1
x[2,1] = 3
x[3,2] = 5
x[0,2] = 7
for s in [(12,1),(1,12)]:
assert_array_equal(x.reshape(s).todense(),
x.todense().reshape(s))
def test_lil_lil_assignment(self):
""" Tests whether a row of one lil_matrix can be assigned to
another.
"""
B = self.B.copy()
A = B / 10
B[0,:] = A[0,:]
assert_array_equal(A[0,:].A, B[0,:].A)
def test_inplace_ops(self):
A = lil_matrix([[0,2,3],[4,0,6]])
B = lil_matrix([[0,1,0],[0,2,3]])
data = {'add': (B,A + B),
'sub': (B,A - B),
'mul': (3,A * 3)}
for op,(other,expected) in data.iteritems():
result = A.copy()
getattr(result, '__i%s__' % op)(other)
assert_array_equal(result.todense(), expected.todense())
def test_lil_slice_assignment(self):
B = lil_matrix((4,3))
B[0,0] = 5
B[1,2] = 3
B[2,1] = 7
expected = array([[10,0,0],
[0,0,6],
[0,14,0],
[0,0,0]])
B[:,:] = B+B
assert_array_equal(B.todense(),expected)
block = [[1,0],[0,4]]
B[:2,:2] = csc_matrix(array(block))
assert_array_equal(B.todense()[:2,:2],block)
def test_lil_sequence_assignment(self):
A = lil_matrix((4,3))
B = eye(3,4,format='lil')
i0 = [0,1,2]
i1 = (0,1,2)
i2 = array( i0 )
A[0,i0] = B[i0,0]
A[1,i1] = B[i1,1]
A[2,i2] = B[i2,2]
assert_array_equal(A.todense(),B.T.todense())
# column slice
A = lil_matrix((2,3))
A[1,1:3] = [10,20]
assert_array_equal(A.todense(), [[0,0,0],[0,10,20]])
# column slice
A = lil_matrix((3,2))
A[1:3,1] = [[10],[20]]
assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]])
def test_lil_iteration(self):
row_data = [[1,2,3],[4,5,6]]
B = lil_matrix(array(row_data))
for r,row in enumerate(B):
assert_array_equal(row.todense(),array(row_data[r],ndmin=2))
def test_lil_from_csr(self):
""" Tests whether a lil_matrix can be constructed from a
csr_matrix.
"""
B = lil_matrix((10,10))
B[0,3] = 10
B[5,6] = 20
B[8,3] = 30
B[3,8] = 40
B[8,9] = 50
C = B.tocsr()
D = lil_matrix(C)
assert_array_equal(C.A, D.A)
def test_fancy_indexing(self):
M = arange(25).reshape(5,5)
A = lil_matrix( M )
assert_equal(A[array([1,2,3]),2:3].todense(), M[array([1,2,3]),2:3])
def test_point_wise_multiply(self):
l = lil_matrix((4,3))
l[0,0] = 1
l[1,1] = 2
l[2,2] = 3
l[3,1] = 4
m = lil_matrix((4,3))
m[0,0] = 1
m[0,1] = 2
m[2,2] = 3
m[3,1] = 4
m[3,2] = 4
assert_array_equal(l.multiply(m).todense(),
m.multiply(l).todense())
assert_array_equal(l.multiply(m).todense(),
[[1,0,0],
[0,0,0],
[0,0,9],
[0,16,0]])
def test_lil_multiply_removal(self):
"""Ticket #1427."""
a = lil_matrix(np.ones((3,3)))
a *= 2.
a[0, :] = 0
class TestCOO(_TestCommon, TestCase):
spmatrix = coo_matrix
def test_constructor1(self):
"""unsorted triplet format"""
row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2])
col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1])
data = array([ 6., 10., 3., 9., 1., 4.,
11., 2., 8., 5., 7.])
coo = coo_matrix((data,(row,col)),(4,3))
assert_array_equal(arange(12).reshape(4,3),coo.todense())
def test_constructor2(self):
"""unsorted triplet format with duplicates (which are summed)"""
row = array([0,1,2,2,2,2,0,0,2,2])
col = array([0,2,0,2,1,1,1,0,0,2])
data = array([2,9,-4,5,7,0,-1,2,1,-5])
coo = coo_matrix((data,(row,col)),(3,3))
mat = matrix([[4,-1,0],[0,0,9],[-3,7,0]])
assert_array_equal(mat,coo.todense())
def test_constructor3(self):
"""empty matrix"""
coo = coo_matrix( (4,3) )
assert_array_equal(coo.shape,(4,3))
assert_array_equal(coo.row,[])
assert_array_equal(coo.col,[])
assert_array_equal(coo.data,[])
assert_array_equal(coo.todense(),zeros((4,3)))
def test_constructor4(self):
"""from dense matrix"""
mat = array([[0,1,0,0],
[7,0,3,0],
[0,4,0,0]])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat)
#upgrade rank 1 arrays to row matrix
mat = array([0,1,0,0])
coo = coo_matrix(mat)
assert_array_equal(coo.todense(),mat.reshape(1,-1))
class TestDIA(_TestCommon, _TestArithmetic, TestCase):
spmatrix = dia_matrix
def test_constructor1(self):
D = matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
data = np.array([[1,2,3,4]]).repeat(3,axis=0)
offsets = np.array([0,-1,2])
assert_equal(dia_matrix( (data,offsets), shape=(4,4)).todense(), D)
class TestBSR(_TestCommon, _TestArithmetic, _TestInplaceArithmetic, TestCase):
spmatrix = bsr_matrix
def test_constructor1(self):
"""check native BSR format constructor"""
indptr = array([0,2,2,4])
indices = array([0,2,2,3])
data = zeros((4,2,3))
data[0] = array([[ 0, 1, 2],
[ 3, 0, 5]])
data[1] = array([[ 0, 2, 4],
[ 6, 0, 10]])
data[2] = array([[ 0, 4, 8],
[12, 0, 20]])
data[3] = array([[ 0, 5, 10],
[15, 0, 25]])
A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] )
Asp = bsr_matrix((data,indices,indptr),shape=(6,12))
assert_equal(Asp.todense(),A)
#infer shape from arrays
Asp = bsr_matrix((data,indices,indptr))
assert_equal(Asp.todense(),A)
def test_constructor2(self):
"""construct from dense"""
#test zero mats
for shape in [ (1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]:
A = zeros(shape)
assert_equal(bsr_matrix(A).todense(),A)
A = zeros((4,6))
assert_equal(bsr_matrix(A,blocksize=(2,2)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
A = kron( [[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]] )
assert_equal(bsr_matrix(A).todense(),A)
assert_equal(bsr_matrix(A,shape=(6,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(1,1)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,6)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(2,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(3,12)).todense(),A)
assert_equal(bsr_matrix(A,blocksize=(6,12)).todense(),A)
A = kron( [[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]] )
assert_equal(bsr_matrix(A,blocksize=(2,3)).todense(),A)
def test_eliminate_zeros(self):
data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T
data = data.reshape(-1,2,2)
indices = array( [1, 2, 3, 4, 5, 6, 7, 8] )
indptr = array( [0, 3, 8] )
asp = bsr_matrix( (data, indices, indptr), shape=(4,20) )
bsp = asp.copy()
asp.eliminate_zeros()
assert_array_equal(asp.nnz, 3*4)
assert_array_equal(asp.todense(),bsp.todense())
def test_bsr_matvec(self):
A = bsr_matrix( arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5) )
x = arange(A.shape[1]).reshape(-1,1)
assert_equal(A*x, A.todense()*x)
def test_bsr_matvecs(self):
A = bsr_matrix( arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5) )
x = arange(A.shape[1]*6).reshape(-1,6)
assert_equal(A*x, A.todense()*x)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | -5,126,287,273,559,635,000 | 34.541757 | 99 | 0.507576 | false |
sayoun/pyvac | pyvac/views/base.py | 1 | 7745 | # -*- coding: utf-8 -*-
import logging
import traceback
from datetime import datetime
from webob import Response
from pyramid.security import authenticated_userid
from pyramid.httpexceptions import HTTPFound
from pyramid.url import route_url
# from pyramid.response import Response
from pyramid.settings import asbool
from pyvac.helpers.sqla import ModelError
from .. import __version__
from ..models import DBSession, User, Request, Sudoer
log = logging.getLogger(__name__)
class ViewBase(object):
"""
Pyvac view base class.
"""
def __init__(self, request):
self.request = request
self.session = DBSession()
login = authenticated_userid(self.request)
if login:
self.login = login
self.user = User.by_login(self.session, login)
else:
self.login = 'anonymous'
self.user = None
def update_response(self, response):
pass
def on_error(self, exception):
return True
def __call__(self):
try:
log.info('dispatch view %s', self.__class__.__name__)
response = self.render()
self.update_response(response)
# if isinstance(response, dict):
# log.info("rendering template with context %r", dict)
self.session.flush()
except Exception as exc:
if self.on_error(exc):
log.error('Error on view %s' % self.__class__.__name__,
exc_info=True)
raise
log.info('view %s dispatched', self.__class__.__name__)
return response
def render(self):
return {}
class View(ViewBase):
"""
Base class of every views.
"""
def update_response(self, response):
# this is a view to render
if isinstance(response, dict):
global_ = {
'pyvac': {
'version': __version__,
'login': self.login,
'user': self.user,
}
}
if self.user:
# if logged, retrieve total requests count for header
req_list = {'requests': []}
requests = []
if self.user.is_admin:
country = self.user.country
requests = Request.all_for_admin_per_country(self.session,
country)
elif self.user.is_super:
requests = Request.by_manager(self.session, self.user)
req_list['requests'] = requests
# always add our requests
for req in Request.by_user(self.session, self.user):
if req not in req_list['requests']:
req_list['requests'].append(req)
# only count next requests
today = datetime.now()
if self.user.is_admin:
# for admin, display request from 1st of month
today = today.replace(day=1)
requests_count = len([req for req in req_list['requests']
if req.date_to >= today])
global_['pyvac']['requests_count'] = requests_count
# retrieve available users for sudo
sudoers = Sudoer.alias(self.session, self.user)
if sudoers:
sudoers.append(self.user)
global_['pyvac']['sudoers'] = sudoers
response.update(global_)
class RedirectView(View):
"""
Base class of every view that redirect after post.
"""
redirect_route = None
redirect_kwargs = {}
def render(self):
return self.redirect()
def redirect(self, redirect_route=None):
settings = self.request.registry.settings
if 'pyvac.force_scheme' in settings:
scheme = settings.get('pyvac.force_scheme')
self.redirect_kwargs['_scheme'] = scheme
route = redirect_route or self.redirect_route
return HTTPFound(location=route_url(route, self.request,
**self.redirect_kwargs))
class CreateView(RedirectView):
"""
Base class of every create view.
"""
model = None
matchdict_key = None
def parse_form(self):
kwargs = {}
prefix = self.model.__tablename__
for k, v in list(self.request.params.items()):
if v and k.startswith(prefix):
kwargs[k.split('.').pop()] = v
return kwargs
def get_model(self):
return self.model()
def update_model(self, model):
"""
trivial implementation for simple data in the form,
using the model prefix.
"""
for k, v in list(self.parse_form().items()):
if k == 'ldap_user':
v = bool(int(v))
setattr(model, k, v)
def update_view(self, model, view):
"""
render initialize trivial view propertie,
but update_view is a method to customize the view to render.
"""
def validate(self, model, errors):
return len(errors) == 0
def save_model(self, model):
log.debug('saving %s' % model.__class__.__name__)
log.debug('%r' % model.__dict__)
self.session.add(model)
def render(self):
settings = self.request.registry.settings
ldap = False
if 'pyvac.use_ldap' in settings:
ldap = asbool(settings.get('pyvac.use_ldap'))
if 'form.cancelled' in self.request.params:
return self.redirect()
log.debug('rendering %s' % self.__class__.__name__)
errors = []
model = self.get_model()
if self.user and not self.user.is_admin:
if model.id != self.user.id:
return self.redirect('home')
if 'form.submitted' in self.request.params:
self.validate(model, errors)
if not errors:
try:
self.update_model(model)
model.validate(self.session, ldap=ldap)
except ModelError as err:
errors.extend(err.errors)
if not errors:
self.save_model(model)
return self.redirect()
rv = {'errors': errors,
self.model.__tablename__: model,
'use_ldap': ldap,
'csrf_token': self.request.session.get_csrf_token()}
self.update_view(model, rv)
log.debug(repr(rv))
return rv
class EditView(CreateView):
"""
Base class of every edit view.
"""
def get_model(self):
return self.model.by_id(
self.session, int(self.request.matchdict[self.matchdict_key]))
class DeleteView(RedirectView):
"""
Base class of every delete view.
"""
model = None
matchdict_key = None
redirect_route = None
def delete(self, model):
self.session.delete(model)
def render(self):
model = self.model.by_id(
self.session, int(self.request.matchdict[self.matchdict_key]))
if 'form.submitted' in self.request.params:
self.delete(model)
return self.redirect()
return {self.model.__tablename__: model}
def forbidden_view(request):
return HTTPFound(location=route_url('login', request))
def exception_view(context, request):
log.error("The error was: %s" % context, exc_info=(context))
body = """Oops ! An internal error has occured, maybe this can help ?<br/>
<pre>%s</pre>""" % traceback.format_exc()
return Response(status_int=500, body=body)
| bsd-3-clause | -2,335,569,999,331,767,300 | 28.116541 | 78 | 0.541382 | false |
pombredanne/https-gitlab.lrde.epita.fr-vcsn-vcsn | tests/python/automaton.py | 1 | 13433 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import vcsn
from test import *
## -------------- ##
## dot: parsing. ##
## -------------- ##
# Check invalid input.
def xfail(a):
XFAIL(lambda: vcsn.automaton(a))
# Syntax error: missing }.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
''')
# Syntax error: string not closed.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b
}
''')
# Syntax error: attributes are assignments.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [attribute]
}
''')
# Syntax error: attributes are assignments.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [attribute =]
}
''')
# Syntax error: comma used after empty attribute.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [,a=a]
}
''')
# Syntax error: semicolon used after empty attribute
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [;a=a]
}
''')
# Syntax error: semicolon used after empty attribute
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
a [a=a,;]
}
''')
# Invalid label: letter not in alphabet.
xfail(r'''digraph
{
vcsn_context = "lal_char(), b"
0 -> 1 [label = a]
1 -> F1
I0 -> 0
}
''')
# Invalid label: \e is not valid in LAL.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
0 -> 1 [label = "\\e"]
1 -> F1
I0 -> 0
}
''')
# Invalid label: aa is not valid in LAL.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), b"
0 -> 1 [label = "aa"]
1 -> F1
I0 -> 0
}
''')
# Invalid label: missing '>'.
xfail(r'''digraph
{
vcsn_context = "lal_char(a), z"
0 -> 1 [label = "<2"]
1 -> F1
I0 -> 0
}
''')
# No context defined (see the typo in vcsn_context).
xfail(r'''digraph
{
vcsn_contxt = "lal_char(ab), b"
0 -> 1 [label = a]
1 -> F1
I0 -> 0
}
''')
# Invalid context.
xfail(r'''digraph
{
vcsn_context = "unknown"
0 -> 1 [label = a]
1 -> F1
I0 -> 0
}
''')
# Invalid initial label.
xfail(r'''digraph
{
vcsn_context = "lal_char(ab), b"
0 -> 1 [label = a]
1 -> F1
I0 -> 0 [label = a]
}
''')
# Invalid final label.
xfail(r'''digraph
{
vcsn_context = "lal_char(ab), b"
0 -> 1 [label = a]
1 -> F1 [label = a]
I0 -> 0
}
''')
# \e makes no sense when not in lan.
xfail(r'''digraph
{
vcsn_context = "lal_char(\\e), b"
0 -> 1 [label = "\\e"]
}
''')
# An open context (letters are not specified).
CHECK_EQ(vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char(abcd), b"
0 -> 0 [label="a, b, c, d"]
}'''),
vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char, b"
0 -> 0 [label="a, b, c, d"]
}'''))
# An open tuple context.
CHECK_EQ(vcsn.automaton(r'''digraph
{
vcsn_context = "lat<lal_char,law_char>, b"
0 -> 0 [label="(a|x),(b|xyz),(c|\\e)"]
}'''),
vcsn.automaton(r'''digraph
{
vcsn_context = "lat<lal_char(abc),law_char(xyz)>, b"
0 -> 0 [label="(a,x),(b,xyz),(c,\\e)"]
}'''))
# Coverage: different rarely used features.
CHECK_EQ(vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char(), b"
{
node [shape = circle]
0 [color = DimGray]
}
}'''),
vcsn.automaton(r'''digraph "a graph
name"
{
vcsn_context // single line comment
=
/* a
multiline
comment. */
"lal_char(), b"
graph [a = "graph attribute",]
edge [a = "edge attribute";]
node [a = "node attribute"]
0:port:nw [a1 = a1, a2 = a2; a3 = a3 a4 = a4]
}'''))
# A context string with ".
CHECK_EQ(vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char(\"\\'), b"
0 -> 0 [label="\", \\'"]
}'''),
vcsn.automaton(r'''digraph
{
vcsn_context = "lal_char, b"
0 -> 0 [label="\", \\'"]
}'''))
# A dot file which uses the HTML strings. And a subgraph.
CHECK_EQ(r'''context = "nullableset<letterset<char_letters(ab)>>, b"
$ -> 0
$ -> 3
0 -> 1 a, b
1 -> $
2 -> 1 a
3 -> 2 b''',
vcsn.automaton(filename=medir+'/html.gv').format('daut'))
## --------------- ##
## automaton.dot. ##
## --------------- ##
# Make sure to check the rendering useful/useless named/nameless
# states, weights, and spontaneous transitions.
c = vcsn.context('lan_char(ab), z')
a = c.expression('<2>a+<2>b').thompson()
CHECK_EQ('''digraph
{
vcsn_context = "nullableset<letterset<char_letters(ab)>>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F1
}
{
node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]
0
1
2
3
4
5
}
I0 -> 0
0 -> 2 [label = "ε"]
0 -> 4 [label = "ε"]
1 -> F1
2 -> 3 [label = "⟨2⟩a"]
3 -> 1 [label = "ε"]
4 -> 5 [label = "⟨2⟩b"]
5 -> 1 [label = "ε"]
}''',
a.dot())
# conjunction: state names, and useless states, etc.
CHECK_EQ('''digraph
{
vcsn_context = "nullableset<letterset<char_letters(ab)>>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F11
}
{
node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]
0 [label = "0, 0", shape = box, fixedsize = false]
1 [label = "2, 0", shape = box, fixedsize = false]
2 [label = "4, 0", shape = box, fixedsize = false]
3 [label = "2, 2", shape = box, fixedsize = false]
4 [label = "2, 4", shape = box, fixedsize = false, fillcolor = lightgray]
5 [label = "4, 2", shape = box, fixedsize = false, fillcolor = lightgray]
6 [label = "4, 4", shape = box, fixedsize = false]
7 [label = "3, 3", shape = box, fixedsize = false]
8 [label = "5, 5", shape = box, fixedsize = false]
9 [label = "1, 3", shape = box, fixedsize = false]
10 [label = "1, 5", shape = box, fixedsize = false]
11 [label = "1, 1", shape = box, fixedsize = false]
}
I0 -> 0
0 -> 1 [label = "ε"]
0 -> 2 [label = "ε"]
1 -> 3 [label = "ε"]
1 -> 4 [label = "ε", color = DimGray]
2 -> 5 [label = "ε", color = DimGray]
2 -> 6 [label = "ε"]
3 -> 7 [label = "⟨4⟩a"]
6 -> 8 [label = "⟨4⟩b"]
7 -> 9 [label = "ε"]
8 -> 10 [label = "ε"]
9 -> 11 [label = "ε"]
10 -> 11 [label = "ε"]
11 -> F11
}''',
(a&a).dot())
# Tooltip.
CHECK_EQ('''digraph
{
vcsn_context = "nullableset<letterset<char_letters(ab)>>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F11
}
{
node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]
0 [tooltip = "0, 0"]
1 [tooltip = "2, 0"]
2 [tooltip = "4, 0"]
3 [tooltip = "2, 2"]
4 [tooltip = "2, 4", fillcolor = lightgray]
5 [tooltip = "4, 2", fillcolor = lightgray]
6 [tooltip = "4, 4"]
7 [tooltip = "3, 3"]
8 [tooltip = "5, 5"]
9 [tooltip = "1, 3"]
10 [tooltip = "1, 5"]
11 [tooltip = "1, 1"]
}
I0 -> 0
0 -> 1 [label = "ε"]
0 -> 2 [label = "ε"]
1 -> 3 [label = "ε"]
1 -> 4 [label = "ε", color = DimGray]
2 -> 5 [label = "ε", color = DimGray]
2 -> 6 [label = "ε"]
3 -> 7 [label = "⟨4⟩a"]
6 -> 8 [label = "⟨4⟩b"]
7 -> 9 [label = "ε"]
8 -> 10 [label = "ε"]
9 -> 11 [label = "ε"]
10 -> 11 [label = "ε"]
11 -> F11
}''',
(a&a).dot("tooltip"))
# Transitions.
CHECK_EQ('''digraph
{
vcsn_context = "nullableset<letterset<char_letters(ab)>>, z"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F11
}
{
node [shape = point, width = 0]
0 [label = "0, 0"]
1 [label = "2, 0"]
2 [label = "4, 0"]
3 [label = "2, 2"]
4 [label = "2, 4", fillcolor = lightgray]
5 [label = "4, 2", fillcolor = lightgray]
6 [label = "4, 4"]
7 [label = "3, 3"]
8 [label = "5, 5"]
9 [label = "1, 3"]
10 [label = "1, 5"]
11 [label = "1, 1"]
}
I0 -> 0
0 -> 1 [label = "ε"]
0 -> 2 [label = "ε"]
1 -> 3 [label = "ε"]
1 -> 4 [label = "ε", color = DimGray]
2 -> 5 [label = "ε", color = DimGray]
2 -> 6 [label = "ε"]
3 -> 7 [label = "⟨4⟩a"]
6 -> 8 [label = "⟨4⟩b"]
7 -> 9 [label = "ε"]
8 -> 10 [label = "ε"]
9 -> 11 [label = "ε"]
10 -> 11 [label = "ε"]
11 -> F11
}''',
(a&a).dot("transitions"))
# Empty set.
CHECK_EQ('''digraph
{
vcsn_context = "letterset<char_letters()>, b"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F0
}
{
node [fontsize = 12, fillcolor = cadetblue1, shape = circle, style = "filled,rounded", height = 0.4, width = 0.4, fixedsize = true]
0 [label = "∅ᶜ", shape = box, fixedsize = false]
}
I0 -> 0
0 -> F0
}''',
vcsn.context('lal_char, b').expression('\z{c}').derived_term().dot())
## ------------- ##
## dot: simple. ##
## ------------- ##
ctx = vcsn.context('lal<string>, b')
e = ctx.expression("'🍺':'🍾':'☕️':'🍷' & [^]*'🍺'[^]*'☕️'[^]* & ([^]*'🍷''🍾'[^]*){c}")
CHECK_EQ(open(medir + '/drinks-simple.gv').read().strip(),
e.automaton().minimize().dot('simple'))
## ------------------------------- ##
## Output: dot, dot2tex and TikZ. ##
## ------------------------------- ##
import glob
for fn in glob.glob(os.path.join(medir, '*.in.gv')):
print("Checking: ", fn)
a = vcsn.automaton(filename = fn)
exp = open(fn.replace('.in.gv', '.out.gv')).read().strip()
CHECK_EQ(exp, a.format('dot'))
exp = open(fn.replace('.in.gv', '.tex.gv')).read().strip()
CHECK_EQ(exp, a.format('dot,latex'))
exp = open(fn.replace('.in.gv', '.tex')).read().strip()
CHECK_EQ(exp, a.format('tikz'))
# Check state names in TikZ.
a = vcsn.context('lal_char, b').expression('\e+a').derived_term()
exp = open(os.path.join(medir, 'derived-term.tex')).read().strip()
CHECK_EQ(exp, a.format('tikz'))
## ----------- ##
## I/O: Daut. ##
## ----------- ##
for fn in glob.glob(os.path.join(medir, '*.in.gv')):
a = vcsn.automaton(filename=fn)
# Check output.
daut = a.format('daut')
exp = open(fn.replace('.in.gv', '.daut')).read().strip()
CHECK_EQ(exp, daut)
# Check input: make sure we can read it.
CHECK_EQ(a, vcsn.automaton(exp, 'daut'))
CHECK_EQ(a, vcsn.automaton(exp, 'auto'))
CHECK_EQ(a, vcsn.automaton(exp))
# A daut file whose names have quotes: beware of building "Ifoo" and
# "Ffoo", not I"foo" and F"foo".
CHECK_EQ(r'''digraph
{
vcsn_context = "letterset<char_letters()>, b"
rankdir = LR
edge [arrowhead = vee, arrowsize = .6]
{
node [shape = point, width = 0]
I0
F0
}
{
node [shape = circle, style = rounded, width = 0.5]
0 [label = "foo", shape = box]
}
I0 -> 0
0 -> F0
}''',
vcsn.automaton('''$ -> "foo"
"foo" -> $''', strip = False))
## ----------- ##
## I/O: FAdo. ##
## ----------- ##
try:
import FAdo
has_fado = True
except ImportError:
has_fado = False
def check_fado(aut):
'''Check that FAdo accepts aut.format('fado') as input.'''
if has_fado:
name = "automaton.fado"
from FAdo import fa
# I did not find a means to read from a string...
with open(name, 'w') as f:
f.write(aut.format('fado') + "\n")
fa.readFromFile(name)
os.remove(name)
else:
SKIP("FAdo not installed")
for fn in glob.glob(os.path.join(medir, '*.fado')):
exp = vcsn.automaton(filename = fn.replace('.fado', '.gv'))
# Check that we can read FAdo.
CHECK_EQ(exp, vcsn.automaton(filename = fn, format = 'fado'))
CHECK_EQ(exp, vcsn.automaton(filename = fn, format = 'auto'))
# Check that we can print FAdo.
fado = open(fn).read().strip()
CHECK_EQ(fado, exp.format('fado'))
check_fado(a)
## --------------- ##
## Output: Grail. ##
## --------------- ##
def check_grail(aut):
'''Check that FAdo accepts aut.format('grail') as input.'''
if has_fado:
name = "automaton.grail"
from FAdo import grail
# I did not find a means to read from a string...
with open(name, 'w') as f:
f.write(aut.format('grail') + "\n")
grail.importFromGrailFile(name)
os.remove(name)
else:
SKIP("FAdo not installed")
for fn in glob.glob(os.path.join(medir, '*.grail')):
a = vcsn.automaton(filename = fn.replace('.grail', '.gv'))
# Check that we can print Grail.
grail = open(fn).read().strip()
CHECK_EQ(grail, a.format('grail'))
check_grail(a)
## ------------ ##
## Conversion. ##
## ------------ ##
# Convert an automaton from lal_char, b to law_char, z.
CHECK_EQ(vcsn.automaton('''context = "law_char, z"
$ -> 0
0 -> 1 a, b
1 -> 1 c
1 -> $''', 'daut'),
vcsn.automaton('''context = "lal_char(abc), b"
$ -> 0
0 -> 1 a, b
1 -> 1 c
1 -> $''', 'daut').automaton(vcsn.context("law_char(abc), z")))
# Convert an automaton to a smaller, valid, alphabet.
CHECK_EQ(vcsn.automaton('''context = "law_char(abc), z"
0 -> 1 a, b''', 'daut'),
vcsn.automaton('''context = "lal_char(a-z), b"
0 -> 1 a, b''', 'daut').automaton(vcsn.context("law_char(abc), z")))
# Convert an automaton to a smaller, invalid, alphabet.
XFAIL(lambda: vcsn.automaton('''context = "lal_char(abc), b"
0 -> 1 a, b''', 'daut').automaton(vcsn.context("law_char(xy), z")))
# Convert to an invalid smaller weightset.
XFAIL(lambda: vcsn.automaton('''context = "lal_char(abc), z"
0 -> 1 <3>a, b''', 'daut').automaton(vcsn.context("lal_char(xy), b")))
| gpl-3.0 | 8,603,536,619,741,621,000 | 22.114385 | 135 | 0.531004 | false |
gunan/tensorflow | tensorflow/lite/experimental/support/metadata/metadata_test.py | 1 | 19660 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.lite.experimental.support.metadata.metadata."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from flatbuffers.python import flatbuffers
from tensorflow.lite.experimental.support.metadata import metadata as _metadata
from tensorflow.lite.experimental.support.metadata import metadata_schema_py_generated as _metadata_fb
from tensorflow.lite.experimental.support.metadata import schema_py_generated as _schema_fb
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
class MetadataTest(test_util.TensorFlowTestCase):
def setUp(self):
super(MetadataTest, self).setUp()
self._invalid_model_buf = None
self._invalid_file = "not_existed_file"
self._empty_model_buf = self._create_empty_model_buf()
self._empty_model_file = self.create_tempfile().full_path
with open(self._empty_model_file, "wb") as f:
f.write(self._empty_model_buf)
self._model_file = self._create_model_file_with_metadata_and_buf_fields()
self._metadata_file = self._create_metadata_file()
self._file1 = self.create_tempfile("file1").full_path
self._file2 = self.create_tempfile("file2").full_path
self._file3 = self.create_tempfile("file3").full_path
def _create_empty_model_buf(self):
model = _schema_fb.ModelT()
model_builder = flatbuffers.Builder(0)
model_builder.Finish(
model.Pack(model_builder),
_metadata.MetadataPopulator.TFLITE_FILE_IDENTIFIER)
return model_builder.Output()
def _create_model_file_with_metadata_and_buf_fields(self):
metadata_field = _schema_fb.MetadataT()
metadata_field.name = "meta"
buffer_field = _schema_fb.BufferT()
model = _schema_fb.ModelT()
model.metadata = [metadata_field, metadata_field]
model.buffers = [buffer_field, buffer_field, buffer_field]
model_builder = flatbuffers.Builder(0)
model_builder.Finish(
model.Pack(model_builder),
_metadata.MetadataPopulator.TFLITE_FILE_IDENTIFIER)
mnodel_file = self.create_tempfile().full_path
with open(mnodel_file, "wb") as f:
f.write(model_builder.Output())
return mnodel_file
def _create_metadata_file(self):
associated_file1 = _metadata_fb.AssociatedFileT()
associated_file1.name = b"file1"
associated_file2 = _metadata_fb.AssociatedFileT()
associated_file2.name = b"file2"
self.expected_recorded_files = [
six.ensure_str(associated_file1.name),
six.ensure_str(associated_file2.name)
]
output_meta = _metadata_fb.TensorMetadataT()
output_meta.associatedFiles = [associated_file2]
subgraph = _metadata_fb.SubGraphMetadataT()
subgraph.outputTensorMetadata = [output_meta]
model_meta = _metadata_fb.ModelMetadataT()
model_meta.name = "Mobilenet_quantized"
model_meta.associatedFiles = [associated_file1]
model_meta.subgraphMetadata = [subgraph]
b = flatbuffers.Builder(0)
b.Finish(
model_meta.Pack(b),
_metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_file = self.create_tempfile().full_path
with open(metadata_file, "wb") as f:
f.write(b.Output())
return metadata_file
def _create_model_buffer_with_wrong_identifier(self):
wrong_identifier = b"widn"
model = _schema_fb.ModelT()
model_builder = flatbuffers.Builder(0)
model_builder.Finish(model.Pack(model_builder), wrong_identifier)
return model_builder.Output()
def _create_metadata_buffer_with_wrong_identifier(self):
# Creates a metadata with wrong identifier
wrong_identifier = b"widn"
metadata = _metadata_fb.ModelMetadataT()
metadata_builder = flatbuffers.Builder(0)
metadata_builder.Finish(metadata.Pack(metadata_builder), wrong_identifier)
return metadata_builder.Output()
def _populate_metadata_with_identifier(self, model_buf, metadata_buf,
identifier):
# For testing purposes only. MetadataPopulator cannot populate metadata with
# wrong identifiers.
model = _schema_fb.ModelT.InitFromObj(
_schema_fb.Model.GetRootAsModel(model_buf, 0))
buffer_field = _schema_fb.BufferT()
buffer_field.data = metadata_buf
model.buffers = [buffer_field]
# Creates a new metadata field.
metadata_field = _schema_fb.MetadataT()
metadata_field.name = _metadata.MetadataPopulator.METADATA_FIELD_NAME
metadata_field.buffer = len(model.buffers) - 1
model.metadata = [metadata_field]
b = flatbuffers.Builder(0)
b.Finish(model.Pack(b), identifier)
return b.Output()
class MetadataPopulatorTest(MetadataTest):
def testToValidModelFile(self):
populator = _metadata.MetadataPopulator.with_model_file(
self._empty_model_file)
self.assertIsInstance(populator, _metadata.MetadataPopulator)
def testToInvalidModelFile(self):
with self.assertRaises(IOError) as error:
_metadata.MetadataPopulator.with_model_file(self._invalid_file)
self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file),
str(error.exception))
def testToValidModelBuffer(self):
populator = _metadata.MetadataPopulator.with_model_buffer(
self._empty_model_buf)
self.assertIsInstance(populator, _metadata.MetadataPopulator)
def testToInvalidModelBuffer(self):
with self.assertRaises(ValueError) as error:
_metadata.MetadataPopulator.with_model_buffer(self._invalid_model_buf)
self.assertEqual("model_buf cannot be empty.", str(error.exception))
def testToModelBufferWithWrongIdentifier(self):
model_buf = self._create_model_buffer_with_wrong_identifier()
with self.assertRaises(ValueError) as error:
_metadata.MetadataPopulator.with_model_buffer(model_buf)
self.assertEqual(
"The model provided does not have the expected identifier, and "
"may not be a valid TFLite model.", str(error.exception))
def testSinglePopulateAssociatedFile(self):
populator = _metadata.MetadataPopulator.with_model_buffer(
self._empty_model_buf)
populator.load_associated_files([self._file1])
populator.populate()
packed_files = populator.get_packed_associated_file_list()
expected_packed_files = [os.path.basename(self._file1)]
self.assertEqual(set(packed_files), set(expected_packed_files))
def testRepeatedPopulateAssociatedFile(self):
populator = _metadata.MetadataPopulator.with_model_file(
self._empty_model_file)
populator.load_associated_files([self._file1, self._file2])
# Loads file2 multiple times.
populator.load_associated_files([self._file2])
populator.populate()
packed_files = populator.get_packed_associated_file_list()
expected_packed_files = [
os.path.basename(self._file1),
os.path.basename(self._file2)
]
self.assertEqual(len(packed_files), 2)
self.assertEqual(set(packed_files), set(expected_packed_files))
# Check if the model buffer read from file is the same as that read from
# get_model_buffer().
with open(self._empty_model_file, "rb") as f:
model_buf_from_file = f.read()
model_buf_from_getter = populator.get_model_buffer()
self.assertEqual(model_buf_from_file, model_buf_from_getter)
def testPopulateInvalidAssociatedFile(self):
populator = _metadata.MetadataPopulator.with_model_buffer(
self._empty_model_buf)
with self.assertRaises(IOError) as error:
populator.load_associated_files([self._invalid_file])
self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file),
str(error.exception))
def testPopulatePackedAssociatedFile(self):
populator = _metadata.MetadataPopulator.with_model_buffer(
self._empty_model_buf)
populator.load_associated_files([self._file1])
populator.populate()
with self.assertRaises(ValueError) as error:
populator.load_associated_files([self._file1])
populator.populate()
self.assertEqual(
"File, '{0}', has already been packed.".format(
os.path.basename(self._file1)), str(error.exception))
def testGetPackedAssociatedFileList(self):
populator = _metadata.MetadataPopulator.with_model_buffer(
self._empty_model_buf)
packed_files = populator.get_packed_associated_file_list()
self.assertEqual(packed_files, [])
def testPopulateMetadataFileToEmptyModelFile(self):
populator = _metadata.MetadataPopulator.with_model_file(
self._empty_model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1, self._file2])
populator.populate()
with open(self._empty_model_file, "rb") as f:
model_buf_from_file = f.read()
model = _schema_fb.Model.GetRootAsModel(model_buf_from_file, 0)
metadata_field = model.Metadata(0)
self.assertEqual(
six.ensure_str(metadata_field.Name()),
six.ensure_str(_metadata.MetadataPopulator.METADATA_FIELD_NAME))
buffer_index = metadata_field.Buffer()
buffer_data = model.Buffers(buffer_index)
metadata_buf_np = buffer_data.DataAsNumpy()
metadata_buf = metadata_buf_np.tobytes()
with open(self._metadata_file, "rb") as f:
expected_metadata_buf = bytearray(f.read())
self.assertEqual(metadata_buf, expected_metadata_buf)
recorded_files = populator.get_recorded_associated_file_list()
self.assertEqual(set(recorded_files), set(self.expected_recorded_files))
# Up to now, we've proved the correctness of the model buffer that read from
# file. Then we'll test if get_model_buffer() gives the same model buffer.
model_buf_from_getter = populator.get_model_buffer()
self.assertEqual(model_buf_from_file, model_buf_from_getter)
def testPopulateMetadataFileWithoutAssociatedFiles(self):
populator = _metadata.MetadataPopulator.with_model_file(
self._empty_model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1])
# Suppose to populate self._file2, because it is recorded in the metadta.
with self.assertRaises(ValueError) as error:
populator.populate()
self.assertEqual(("File, '{0}', is recorded in the metadata, but has "
"not been loaded into the populator.").format(
os.path.basename(self._file2)), str(error.exception))
def testPopulateMetadataBufferWithWrongIdentifier(self):
metadata_buf = self._create_metadata_buffer_with_wrong_identifier()
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
with self.assertRaises(ValueError) as error:
populator.load_metadata_buffer(metadata_buf)
self.assertEqual(
"The metadata buffer does not have the expected identifier, and may not"
" be a valid TFLite Metadata.", str(error.exception))
def _assert_golden_metadata(self, model_file):
with open(model_file, "rb") as f:
model_buf_from_file = f.read()
model = _schema_fb.Model.GetRootAsModel(model_buf_from_file, 0)
# There are two elements in model.Metadata array before the population.
# Metadata should be packed to the third element in the array.
metadata_field = model.Metadata(2)
self.assertEqual(
six.ensure_str(metadata_field.Name()),
six.ensure_str(_metadata.MetadataPopulator.METADATA_FIELD_NAME))
buffer_index = metadata_field.Buffer()
buffer_data = model.Buffers(buffer_index)
metadata_buf_np = buffer_data.DataAsNumpy()
metadata_buf = metadata_buf_np.tobytes()
with open(self._metadata_file, "rb") as f:
expected_metadata_buf = bytearray(f.read())
self.assertEqual(metadata_buf, expected_metadata_buf)
def testPopulateMetadataFileToModelWithMetadataAndAssociatedFiles(self):
# First, creates a dummy metadata. Populates it and the associated files
# into the model.
model_meta = _metadata_fb.ModelMetadataT()
model_meta.name = "Mobilenet_quantized"
b = flatbuffers.Builder(0)
b.Finish(
model_meta.Pack(b),
_metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
metadata_buf = b.Output()
populator1 = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator1.load_metadata_buffer(metadata_buf)
populator1.load_associated_files([self._file1, self._file2])
populator1.populate()
# Then, populates the metadata again.
populator2 = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator2.load_metadata_file(self._metadata_file)
populator2.populate()
# Tests if the metadata is populated correctly.
self._assert_golden_metadata(self._model_file)
def testPopulateMetadataFileToModelFileWithMetadataAndBufFields(self):
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1, self._file2])
populator.populate()
# Tests if the metadata is populated correctly.
self._assert_golden_metadata(self._model_file)
recorded_files = populator.get_recorded_associated_file_list()
self.assertEqual(set(recorded_files), set(self.expected_recorded_files))
# Up to now, we've proved the correctness of the model buffer that read from
# file. Then we'll test if get_model_buffer() gives the same model buffer.
with open(self._model_file, "rb") as f:
model_buf_from_file = f.read()
model_buf_from_getter = populator.get_model_buffer()
self.assertEqual(model_buf_from_file, model_buf_from_getter)
def testPopulateInvalidMetadataFile(self):
populator = _metadata.MetadataPopulator.with_model_buffer(
self._empty_model_buf)
with self.assertRaises(IOError) as error:
populator.load_metadata_file(self._invalid_file)
self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file),
str(error.exception))
def testPopulateInvalidMetadataBuffer(self):
populator = _metadata.MetadataPopulator.with_model_buffer(
self._empty_model_buf)
with self.assertRaises(ValueError) as error:
populator.load_metadata_buffer([])
self.assertEqual("The metadata to be populated is empty.",
str(error.exception))
def testGetModelBufferBeforePopulatingData(self):
populator = _metadata.MetadataPopulator.with_model_buffer(
self._empty_model_buf)
model_buf = populator.get_model_buffer()
expected_model_buf = self._empty_model_buf
self.assertEqual(model_buf, expected_model_buf)
class MetadataDisplayerTest(MetadataTest):
def setUp(self):
super(MetadataDisplayerTest, self).setUp()
self._model_file = self._create_model_with_metadata_and_associated_files()
def _create_model_with_metadata_and_associated_files(self):
model_buf = self._create_empty_model_buf()
model_file = self.create_tempfile().full_path
with open(model_file, "wb") as f:
f.write(model_buf)
populator = _metadata.MetadataPopulator.with_model_file(model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1, self._file2])
populator.populate()
return model_file
def test_load_model_buffer_metadataBufferWithWrongIdentifier_throwsException(
self):
model_buf = self._create_model_buffer_with_wrong_identifier()
metadata_buf = self._create_metadata_buffer_with_wrong_identifier()
model_buf = self._populate_metadata_with_identifier(
model_buf, metadata_buf,
_metadata.MetadataPopulator.TFLITE_FILE_IDENTIFIER)
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_buffer(model_buf)
self.assertEqual(
"The metadata buffer does not have the expected identifier, and may not"
" be a valid TFLite Metadata.", str(error.exception))
def test_load_model_buffer_modelBufferWithWrongIdentifier_throwsException(
self):
model_buf = self._create_model_buffer_with_wrong_identifier()
metadata_file = self._create_metadata_file()
wrong_identifier = b"widn"
with open(metadata_file, "rb") as f:
metadata_buf = bytearray(f.read())
model_buf = self._populate_metadata_with_identifier(model_buf, metadata_buf,
wrong_identifier)
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_buffer(model_buf)
self.assertEqual(
"The model provided does not have the expected identifier, and "
"may not be a valid TFLite model.", str(error.exception))
def test_load_model_file_invalidModelFile_throwsException(self):
with self.assertRaises(IOError) as error:
_metadata.MetadataDisplayer.with_model_file(self._invalid_file)
self.assertEqual("File, '{0}', does not exist.".format(self._invalid_file),
str(error.exception))
def test_load_model_file_modelWithoutMetadata_throwsException(self):
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_file(self._empty_model_file)
self.assertEqual("The model does not have metadata.", str(error.exception))
def test_load_model_file_modelWithMetadata(self):
displayer = _metadata.MetadataDisplayer.with_model_file(self._model_file)
self.assertIsInstance(displayer, _metadata.MetadataDisplayer)
def test_load_model_buffer_modelWithOutMetadata_throwsException(self):
with self.assertRaises(ValueError) as error:
_metadata.MetadataDisplayer.with_model_buffer(
self._create_empty_model_buf())
self.assertEqual("The model does not have metadata.", str(error.exception))
def test_load_model_buffer_modelWithMetadata(self):
displayer = _metadata.MetadataDisplayer.with_model_buffer(
open(self._model_file, "rb").read())
self.assertIsInstance(displayer, _metadata.MetadataDisplayer)
def test_get_metadata_json_modelWithMetadata(self):
displayer = _metadata.MetadataDisplayer.with_model_file(self._model_file)
actual = displayer.get_metadata_json()
# Verifies the generated json file.
golden_json_file_path = resource_loader.get_path_to_datafile(
"testdata/golden_json.json")
with open(golden_json_file_path, "r") as f:
expected = f.read()
self.assertEqual(actual, expected)
def test_get_packed_associated_file_list_modelWithMetadata(self):
displayer = _metadata.MetadataDisplayer.with_model_file(self._model_file)
packed_files = displayer.get_packed_associated_file_list()
expected_packed_files = [
os.path.basename(self._file1),
os.path.basename(self._file2)
]
self.assertEqual(len(packed_files), 2)
self.assertEqual(set(packed_files), set(expected_packed_files))
if __name__ == "__main__":
test.main()
| apache-2.0 | -8,181,265,324,011,496,000 | 41.462203 | 102 | 0.71002 | false |
jghibiki/Cursed | terminal/interactive.py | 1 | 2923 |
class InitModule:
def init(self, viewer):
raise Exception("init has not been implemented.")
class InteractiveModule:
def _handle(self, viewer, ch):
raise Exception("_handle has not been implemented.")
def _handle_help(self, viewer, buf):
raise Exception("_handle_help has not been implemented.")
def _handle_combo(self, viewer, buf):
raise Exception("_handle_como has not been implemented.")
class LiveModule:
def _register_hooks(self, client):
raise Exception(self.__class__.__name__ + "._register_hooks has not been implemented.")
def _update(self, viewer):
raise Exception("_update has not been implemented.")
class TextDisplayModule:
def _show(self, viewer):
raise Exception("_show has not been implemented.")
def _hide(self, viewer):
raise Exception("_hide has not been implemented.")
class VisibleModule:
def draw(self):
raise Exception("draw has not been implemented.")
def up(self):
raise Exception("up not been implemented.")
def down(self):
raise Exception("down not been implemented.")
def left(self):
raise Exception("left not been implemented.")
def right(self):
raise Exception("right not been implemented.")
class FeatureModule:
def add_feature(self):
raise Exception("add_feature has not been implemented.")
def rm_feature(self):
raise Exception("rm_feature has not been implemented.")
def get_feature_idx(self):
raise Exception("get_feature_idx has not been implemented.")
def serialize_features(self):
raise Exception("serialize_feautes has not been implemented.")
class SavableModule:
def save(self):
raise Exception("save has not been implemented.")
class UserModule:
def up(self):
raise Exception("up has not been implemented.")
def down(self):
raise Exception("down has not been implemented.")
def left(self):
raise Exception("left has not been implemented.")
def right(self):
raise Exception("right has not been implemented.")
def vp_up(self):
raise Exception("vp_up has not been implemented.")
def vp_down(self):
raise Exception("vp_down has not been implemented.")
def vp_left(self):
raise Exception("vp_left has not been implemented.")
def vp_right(self):
raise Exception("vp_right has not been implemented.")
class NetworkModule:
pass
class ServerModule(NetworkModule):
def update(self):
raise Exception("update has not been implemented.")
class ClientModule(NetworkModule):
def connect(self):
raise Exception("update has not been implemented.")
def disconnect(self):
raise Exception("update has not been implemented.")
def update(self):
raise Exception("update has not been implemented.")
| mit | -2,809,544,733,819,406,000 | 24.640351 | 95 | 0.659938 | false |
NLeSC/eEcology-script-wrapper | script_wrapper/tests/tasks/test_gpsvis_db.py | 1 | 2958 | # Copyright 2013 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import unittest
from mock import patch
from pytz import UTC
from script_wrapper.tasks.gpsvis_db import GpsVisDB
class TestClassification(unittest.TestCase):
def test_matlab_version(self):
task = GpsVisDB()
self.assertEqual(task.matlab_version, '2012b')
def test_convert_colors_valids(self):
task = GpsVisDB()
colors = {'FFFF50': 1,
'F7E8AA': 2,
'FFA550': 3,
'5A5AFF': 4,
'BEFFFF': 5,
'8CFF8C': 6,
'FF8CFF': 7,
'AADD96': 8,
'FFD3AA': 9,
'C6C699': 10,
'E5BFC6': 11,
'DADADA': 12,
'C6B5C4': 13,
'C1D1BF': 14,
'000000': 15
}
for (code, index) in colors.iteritems():
result = task.convert_colors({'id': 1, 'color': code,
'size': 'small', 'speed': 1})
self.assertEquals(result, index)
def test_convert_colors_notfound(self):
task = GpsVisDB()
with self.assertRaises(ValueError):
task.convert_colors({'id': 1, 'color': 'blue',
'size': 'small', 'speed': 1})
@patch('script_wrapper.tasks.gpsvis_db.getGPSCount')
def test_formfields2taskargs(self, gps_count):
gps_count.return_value = 1000
task = GpsVisDB()
trackers = [{'id': 1, 'color': 'DADADA',
'size': 'small', 'speed': 1}]
formquery = {'trackers': trackers,
'start': '2013-01-01T00:00:00',
'end': '2013-10-10T00:00:00',
'alt': 'clampToGround',
}
taskargs = task.formfields2taskargs(formquery,
'postgresql://localhost')
etrackers = [{'id': 1, 'color': 12,
'size': 'small', 'speed': 1}]
etaskargs = {'db_url': 'postgresql://localhost',
'start': '2013-01-01T00:00:00',
'end': '2013-10-10T00:00:00',
'alt': 'clampToGround',
'trackers': etrackers,
}
self.assertEqual(taskargs, etaskargs)
| apache-2.0 | -4,644,690,590,407,037,000 | 34.214286 | 74 | 0.516903 | false |
nacl-webkit/chrome_deps | tools/telemetry/telemetry/page_unittest.py | 1 | 1754 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry import page
class TestPage(unittest.TestCase):
def testGetUrlBaseDirAndFileForAbsolutePath(self):
apage = page.Page('file:///somedir/otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
dirname, filename = apage.url_base_dir_and_file
self.assertEqual(dirname, 'basedir/somedir/otherdir')
self.assertEqual(filename, 'file.html')
def testGetUrlBaseDirAndFileForRelativePath(self):
apage = page.Page('file:///../../otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
dirname, filename = apage.url_base_dir_and_file
self.assertEqual(dirname, 'basedir/../../otherdir')
self.assertEqual(filename, 'file.html')
def testGetUrlBaseDirAndFileForUrlBaseDir(self):
apage = page.Page('file:///../../somedir/otherdir/file.html',
None, # In this test, we don't need a page set.
base_dir='basedir')
setattr(apage, 'url_base_dir', 'file:///../../somedir/')
dirname, filename = apage.url_base_dir_and_file
self.assertEqual(dirname, 'basedir/../../somedir/')
self.assertEqual(filename, 'otherdir/file.html')
def testDisplayUrlForHttp(self):
self.assertEquals(page.Page('http://www.foo.com/', None).display_url,
'www.foo.com/')
def testDisplayUrlForFile(self):
self.assertEquals(
page.Page('file:///../../otherdir/file.html', None).display_url,
'file.html')
| bsd-3-clause | 6,564,029,803,146,257,000 | 41.780488 | 73 | 0.640251 | false |
Jean-Simon-Barry/djangoproject | djangoproject/settings.py | 1 | 2131 | """
Django settings for djangoproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'stmieloldo55*n#49w!wcsz8sg3e_9bh3_pd2vs1n#(g#mpef6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djangoproject.urls'
WSGI_APPLICATION = 'djangoproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| gpl-2.0 | -9,102,493,995,731,263,000 | 23.77907 | 71 | 0.727827 | false |
c-rack/czmq | bindings/python_cffi/czmq_cffi.py | 1 | 115567 | ################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Please refer to the README for information about making permanent changes. #
################################################################################
from __future__ import print_function
import os
import re
import sys
from pyczmq._cffi import ffi
try:
# If LD_LIBRARY_PATH or your OSs equivalent is set, this is the only way to
# load the library. If we use find_library below, we get the wrong result.
if os.name == 'posix':
if sys.platform == 'darwin':
libpath = 'libczmq.3.dylib'
else:
libpath = 'libczmq.so.3'
elif os.name == 'nt':
libpath = 'libczmq.dll'
lib = ffi.dlopen(libpath)
except OSError:
libpath = find_library("czmq")
if not libpath:
raise ImportError("Unable to find libczmq")
lib = ffi.dlopen(libpath)
# Custom setup for czmq
ffi.cdef('''
typedef int... time_t;
typedef int... off_t;
typedef unsigned char byte; // Single unsigned byte = 8 bits
typedef unsigned short dbyte; // Double byte = 16 bits
typedef unsigned int qbyte; // Quad byte = 32 bits
typedef int SOCKET;
// -- destroy an item
typedef void (czmq_destructor) (void **item);
// -- duplicate an item
typedef void *(czmq_duplicator) (const void *item);
// - compare two items, for sorting
typedef int (czmq_comparator) (const void *item1, const void *item2);
''')
cdefs = '''
typedef struct _zactor_t zactor_t;
typedef struct _zsock_t zsock_t;
typedef struct _zmsg_t zmsg_t;
typedef struct _zarmour_t zarmour_t;
typedef struct _char_t char_t;
typedef struct _zcert_t zcert_t;
typedef struct _zlist_t zlist_t;
typedef struct _zcertstore_t zcertstore_t;
typedef struct _zchunk_t zchunk_t;
typedef struct _zframe_t zframe_t;
typedef struct _zclock_t zclock_t;
typedef struct _msecs_t msecs_t;
typedef struct _zconfig_t zconfig_t;
typedef struct _zdigest_t zdigest_t;
typedef struct _zdir_t zdir_t;
typedef struct _zhash_t zhash_t;
typedef struct _zdir_patch_t zdir_patch_t;
typedef struct _zfile_t zfile_t;
typedef struct _zhashx_t zhashx_t;
typedef struct _zlistx_t zlistx_t;
typedef struct _ziflist_t ziflist_t;
typedef struct _zloop_t zloop_t;
typedef struct _zmq_pollitem_t zmq_pollitem_t;
typedef struct _zpoller_t zpoller_t;
typedef struct _zproc_t zproc_t;
typedef struct _va_list_t va_list_t;
typedef struct _socket_t socket_t;
typedef struct _zstr_t zstr_t;
typedef struct _ztrie_t ztrie_t;
typedef struct _zuuid_t zuuid_t;
// Actors get a pipe and arguments from caller
typedef void (zactor_fn) (
zsock_t *pipe, void *args);
typedef enum {
ZARMOUR_MODE_BASE64_STD = 0, // Standard base 64
ZARMOUR_MODE_BASE64_URL = 1, // URL and filename friendly base 64
ZARMOUR_MODE_BASE32_STD = 2, // Standard base 32
ZARMOUR_MODE_BASE32_HEX = 3, // Extended hex base 32
ZARMOUR_MODE_BASE16 = 4, // Standard base 16
ZARMOUR_MODE_Z85 = 5 // Z85 from ZeroMQ RFC 32
} zarmour_mode_t;
//
typedef int (zconfig_fct) (
zconfig_t *self, void *arg, int level);
typedef enum {
ZDIR_PATCH_CREATE = 1, //
ZDIR_PATCH_DELETE = 2 //
} zdir_patch_op_t;
// Callback function for zhash_freefn method
typedef void (zhash_free_fn) (
void *data);
// Callback function for zhash_foreach method. Deprecated.
typedef int (zhash_foreach_fn) (
const char *key, void *item, void *argument);
// Destroy an item
typedef void (zhashx_destructor_fn) (
void **item);
// Duplicate an item
typedef void * (zhashx_duplicator_fn) (
const void *item);
// Compare two items, for sorting
typedef int (zhashx_comparator_fn) (
const void *item1, const void *item2);
// compare two items, for sorting
typedef void (zhashx_free_fn) (
void *data);
// compare two items, for sorting
typedef size_t (zhashx_hash_fn) (
const void *key);
// DEPRECATED as clumsy -- use zhashx_first/_next instead
typedef int (zhashx_foreach_fn) (
const char *key, void *item, void *argument);
// Comparison function e.g. for sorting and removing.
typedef int (zlist_compare_fn) (
void *item1, void *item2);
// Callback function for zlist_freefn method
typedef void (zlist_free_fn) (
void *data);
// Destroy an item
typedef void (zlistx_destructor_fn) (
void **item);
// Duplicate an item
typedef void * (zlistx_duplicator_fn) (
const void *item);
// Compare two items, for sorting
typedef int (zlistx_comparator_fn) (
const void *item1, const void *item2);
// Callback function for reactor socket activity
typedef int (zloop_reader_fn) (
zloop_t *loop, zsock_t *reader, void *arg);
// Callback function for reactor events (low-level)
typedef int (zloop_fn) (
zloop_t *loop, zmq_pollitem_t *item, void *arg);
// Callback for reactor timer events
typedef int (zloop_timer_fn) (
zloop_t *loop, int timer_id, void *arg);
// Callback function for ztrie_node to destroy node data.
typedef void (ztrie_destroy_data_fn) (
void **data);
// CLASS: zactor
// Create a new actor passing arbitrary arguments reference.
zactor_t *
zactor_new (zactor_fn task, void *args);
// Destroy an actor.
void
zactor_destroy (zactor_t **self_p);
// Send a zmsg message to the actor, take ownership of the message
// and destroy when it has been sent.
int
zactor_send (zactor_t *self, zmsg_t **msg_p);
// Receive a zmsg message from the actor. Returns NULL if the actor
// was interrupted before the message could be received, or if there
// was a timeout on the actor.
zmsg_t *
zactor_recv (zactor_t *self);
// Probe the supplied object, and report if it looks like a zactor_t.
bool
zactor_is (void *self);
// Probe the supplied reference. If it looks like a zactor_t instance,
// return the underlying libzmq actor handle; else if it looks like
// a libzmq actor handle, return the supplied value.
void *
zactor_resolve (void *self);
// Return the actor's zsock handle. Use this when you absolutely need
// to work with the zsock instance rather than the actor.
zsock_t *
zactor_sock (zactor_t *self);
// Self test of this class.
void
zactor_test (bool verbose);
// CLASS: zarmour
// Create a new zarmour.
zarmour_t *
zarmour_new (void);
// Destroy the zarmour.
void
zarmour_destroy (zarmour_t **self_p);
// Encode a stream of bytes into an armoured string. Returns the armoured
// string, or NULL if there was insufficient memory available to allocate
// a new string.
char *
zarmour_encode (zarmour_t *self, const byte *data, size_t size);
// Decode an armoured string into a string of bytes.
// The decoded output is null-terminated, so it may be treated
// as a string, if that's what it was prior to encoding.
byte *
zarmour_decode (zarmour_t *self, const char *data, size_t *decode_size);
// Get the mode property.
zarmour_mode_t
zarmour_mode (zarmour_t *self);
// Get printable string for mode.
const char *
zarmour_mode_str (zarmour_t *self);
// Set the mode property.
void
zarmour_set_mode (zarmour_t *self, zarmour_mode_t mode);
// Return true if padding is turned on.
bool
zarmour_pad (zarmour_t *self);
// Turn padding on or off. Default is on.
void
zarmour_set_pad (zarmour_t *self, bool pad);
// Get the padding character.
char
zarmour_pad_char (zarmour_t *self);
// Set the padding character.
void
zarmour_set_pad_char (zarmour_t *self, char pad_char);
// Return if splitting output into lines is turned on. Default is off.
bool
zarmour_line_breaks (zarmour_t *self);
// Turn splitting output into lines on or off.
void
zarmour_set_line_breaks (zarmour_t *self, bool line_breaks);
// Get the line length used for splitting lines.
size_t
zarmour_line_length (zarmour_t *self);
// Set the line length used for splitting lines.
void
zarmour_set_line_length (zarmour_t *self, size_t line_length);
// Print properties of object
void
zarmour_print (zarmour_t *self);
// Self test of this class.
void
zarmour_test (bool verbose);
// CLASS: zcert
// Create and initialize a new certificate in memory
zcert_t *
zcert_new (void);
// Accepts public/secret key pair from caller
zcert_t *
zcert_new_from (const byte *public_key, const byte *secret_key);
// Load certificate from file
zcert_t *
zcert_load (const char *filename);
// Destroy a certificate in memory
void
zcert_destroy (zcert_t **self_p);
// Return public part of key pair as 32-byte binary string
byte *
zcert_public_key (zcert_t *self);
// Return secret part of key pair as 32-byte binary string
byte *
zcert_secret_key (zcert_t *self);
// Return public part of key pair as Z85 armored string
char *
zcert_public_txt (zcert_t *self);
// Return secret part of key pair as Z85 armored string
char *
zcert_secret_txt (zcert_t *self);
// Set certificate metadata from formatted string.
void
zcert_set_meta (zcert_t *self, const char *name, const char *format, ...);
// Unset certificate metadata.
void
zcert_unset_meta (zcert_t *self, const char *name);
// Get metadata value from certificate; if the metadata value doesn't
// exist, returns NULL.
char *
zcert_meta (zcert_t *self, const char *name);
// Get list of metadata fields from certificate. Caller is responsible for
// destroying list. Caller should not modify the values of list items.
zlist_t *
zcert_meta_keys (zcert_t *self);
// Save full certificate (public + secret) to file for persistent storage
// This creates one public file and one secret file (filename + "_secret").
int
zcert_save (zcert_t *self, const char *filename);
// Save public certificate only to file for persistent storage
int
zcert_save_public (zcert_t *self, const char *filename);
// Save secret certificate only to file for persistent storage
int
zcert_save_secret (zcert_t *self, const char *filename);
// Apply certificate to socket, i.e. use for CURVE security on socket.
// If certificate was loaded from public file, the secret key will be
// undefined, and this certificate will not work successfully.
void
zcert_apply (zcert_t *self, void *zocket);
// Return copy of certificate; if certificate is NULL or we exhausted
// heap memory, returns NULL.
zcert_t *
zcert_dup (zcert_t *self);
// Return true if two certificates have the same keys
bool
zcert_eq (zcert_t *self, zcert_t *compare);
// Print certificate contents to stdout
void
zcert_print (zcert_t *self);
// DEPRECATED as incompatible with centralized logging
// Print certificate contents to open stream
void
zcert_fprint (zcert_t *self, FILE *file);
// Self test of this class
void
zcert_test (bool verbose);
// CLASS: zcertstore
// Create a new certificate store from a disk directory, loading and
// indexing all certificates in that location. The directory itself may be
// absent, and created later, or modified at any time. The certificate store
// is automatically refreshed on any zcertstore_lookup() call. If the
// location is specified as NULL, creates a pure-memory store, which you
// can work with by inserting certificates at runtime.
zcertstore_t *
zcertstore_new (const char *location);
// Destroy a certificate store object in memory. Does not affect anything
// stored on disk.
void
zcertstore_destroy (zcertstore_t **self_p);
// Look up certificate by public key, returns zcert_t object if found,
// else returns NULL. The public key is provided in Z85 text format.
zcert_t *
zcertstore_lookup (zcertstore_t *self, const char *public_key);
// Insert certificate into certificate store in memory. Note that this
// does not save the certificate to disk. To do that, use zcert_save()
// directly on the certificate. Takes ownership of zcert_t object.
void
zcertstore_insert (zcertstore_t *self, zcert_t **cert_p);
// Print list of certificates in store to logging facility
void
zcertstore_print (zcertstore_t *self);
// DEPRECATED as incompatible with centralized logging
// Print list of certificates in store to open stream
void
zcertstore_fprint (zcertstore_t *self, FILE *file);
// Self test of this class
void
zcertstore_test (bool verbose);
// CLASS: zchunk
// Create a new chunk of the specified size. If you specify the data, it
// is copied into the chunk. If you do not specify the data, the chunk is
// allocated and left empty, and you can then add data using zchunk_append.
zchunk_t *
zchunk_new (const void *data, size_t size);
// Destroy a chunk
void
zchunk_destroy (zchunk_t **self_p);
// Resizes chunk max_size as requested; chunk_cur size is set to zero
void
zchunk_resize (zchunk_t *self, size_t size);
// Return chunk cur size
size_t
zchunk_size (zchunk_t *self);
// Return chunk max size
size_t
zchunk_max_size (zchunk_t *self);
// Return chunk data
byte *
zchunk_data (zchunk_t *self);
// Set chunk data from user-supplied data; truncate if too large. Data may
// be null. Returns actual size of chunk
size_t
zchunk_set (zchunk_t *self, const void *data, size_t size);
// Fill chunk data from user-supplied octet
size_t
zchunk_fill (zchunk_t *self, byte filler, size_t size);
// Append user-supplied data to chunk, return resulting chunk size. If the
// data would exceeded the available space, it is truncated. If you want to
// grow the chunk to accommodate new data, use the zchunk_extend method.
size_t
zchunk_append (zchunk_t *self, const void *data, size_t size);
// Append user-supplied data to chunk, return resulting chunk size. If the
// data would exceeded the available space, the chunk grows in size.
size_t
zchunk_extend (zchunk_t *self, const void *data, size_t size);
// Copy as much data from 'source' into the chunk as possible; returns the
// new size of chunk. If all data from 'source' is used, returns exhausted
// on the source chunk. Source can be consumed as many times as needed until
// it is exhausted. If source was already exhausted, does not change chunk.
size_t
zchunk_consume (zchunk_t *self, zchunk_t *source);
// Returns true if the chunk was exhausted by consume methods, or if the
// chunk has a size of zero.
bool
zchunk_exhausted (zchunk_t *self);
// Read chunk from an open file descriptor
zchunk_t *
zchunk_read (FILE *handle, size_t bytes);
// Write chunk to an open file descriptor
int
zchunk_write (zchunk_t *self, FILE *handle);
// Try to slurp an entire file into a chunk. Will read up to maxsize of
// the file. If maxsize is 0, will attempt to read the entire file and
// fail with an assertion if that cannot fit into memory. Returns a new
// chunk containing the file data, or NULL if the file could not be read.
zchunk_t *
zchunk_slurp (const char *filename, size_t maxsize);
// Create copy of chunk, as new chunk object. Returns a fresh zchunk_t
// object, or null if there was not enough heap memory. If chunk is null,
// returns null.
zchunk_t *
zchunk_dup (zchunk_t *self);
// Return chunk data encoded as printable hex string. Caller must free
// string when finished with it.
char *
zchunk_strhex (zchunk_t *self);
// Return chunk data copied into freshly allocated string
// Caller must free string when finished with it.
char *
zchunk_strdup (zchunk_t *self);
// Return TRUE if chunk body is equal to string, excluding terminator
bool
zchunk_streq (zchunk_t *self, const char *string);
// Transform zchunk into a zframe that can be sent in a message.
zframe_t *
zchunk_pack (zchunk_t *self);
// Transform a zframe into a zchunk.
zchunk_t *
zchunk_unpack (zframe_t *frame);
// Calculate SHA1 digest for chunk, using zdigest class.
const char *
zchunk_digest (zchunk_t *self);
// Dump chunk to FILE stream, for debugging and tracing.
void
zchunk_fprint (zchunk_t *self, FILE *file);
// Dump message to stderr, for debugging and tracing.
// See zchunk_fprint for details
void
zchunk_print (zchunk_t *self);
// Probe the supplied object, and report if it looks like a zchunk_t.
bool
zchunk_is (void *self);
// Self test of this class.
void
zchunk_test (bool verbose);
// CLASS: zclock
// Sleep for a number of milliseconds
void
zclock_sleep (int msecs);
// Return current system clock as milliseconds. Note that this clock can
// jump backwards (if the system clock is changed) so is unsafe to use for
// timers and time offsets. Use zclock_mono for that instead.
int64_t
zclock_time (void);
// Return current monotonic clock in milliseconds. Use this when you compute
// time offsets. The monotonic clock is not affected by system changes and
// so will never be reset backwards, unlike a system clock.
int64_t
zclock_mono (void);
// Return current monotonic clock in microseconds. Use this when you compute
// time offsets. The monotonic clock is not affected by system changes and
// so will never be reset backwards, unlike a system clock.
int64_t
zclock_usecs (void);
// Return formatted date/time as fresh string. Free using zstr_free().
char *
zclock_timestr (void);
// Self test of this class.
void
zclock_test (bool verbose);
// CLASS: zconfig
// Create new config item
zconfig_t *
zconfig_new (const char *name, zconfig_t *parent);
// Destroy a config item and all its children
void
zconfig_destroy (zconfig_t **self_p);
// Load a config tree from a specified ZPL text file; returns a zconfig_t
// reference for the root, if the file exists and is readable. Returns NULL
// if the file does not exist.
zconfig_t *
zconfig_load (const char *filename);
// Equivalent to zconfig_load, taking a format string instead of a fixed
// filename.
zconfig_t *
zconfig_loadf (const char *format, ...);
// Return name of config item
char *
zconfig_name (zconfig_t *self);
// Return value of config item
char *
zconfig_value (zconfig_t *self);
// Insert or update configuration key with value
void
zconfig_put (zconfig_t *self, const char *path, const char *value);
// Equivalent to zconfig_put, accepting a format specifier and variable
// argument list, instead of a single string value.
void
zconfig_putf (zconfig_t *self, const char *path, const char *format, ...);
// Get value for config item into a string value; leading slash is optional
// and ignored.
char *
zconfig_get (zconfig_t *self, const char *path, const char *default_value);
// Set config item name, name may be NULL
void
zconfig_set_name (zconfig_t *self, const char *name);
// Set new value for config item. The new value may be a string, a printf
// format, or NULL. Note that if string may possibly contain '%', or if it
// comes from an insecure source, you must use '%s' as the format, followed
// by the string.
void
zconfig_set_value (zconfig_t *self, const char *format, ...);
// Find our first child, if any
zconfig_t *
zconfig_child (zconfig_t *self);
// Find our first sibling, if any
zconfig_t *
zconfig_next (zconfig_t *self);
// Find a config item along a path; leading slash is optional and ignored.
zconfig_t *
zconfig_locate (zconfig_t *self, const char *path);
// Locate the last config item at a specified depth
zconfig_t *
zconfig_at_depth (zconfig_t *self, int level);
// Execute a callback for each config item in the tree; returns zero if
// successful, else -1.
int
zconfig_execute (zconfig_t *self, zconfig_fct handler, void *arg);
// Add comment to config item before saving to disk. You can add as many
// comment lines as you like. If you use a null format, all comments are
// deleted.
void
zconfig_set_comment (zconfig_t *self, const char *format, ...);
// Return comments of config item, as zlist.
zlist_t *
zconfig_comments (zconfig_t *self);
// Save a config tree to a specified ZPL text file, where a filename
// "-" means dump to standard output.
int
zconfig_save (zconfig_t *self, const char *filename);
// Equivalent to zconfig_save, taking a format string instead of a fixed
// filename.
int
zconfig_savef (zconfig_t *self, const char *format, ...);
// Report filename used during zconfig_load, or NULL if none
const char *
zconfig_filename (zconfig_t *self);
// Reload config tree from same file that it was previously loaded from.
// Returns 0 if OK, -1 if there was an error (and then does not change
// existing data).
int
zconfig_reload (zconfig_t **self_p);
// Load a config tree from a memory chunk
zconfig_t *
zconfig_chunk_load (zchunk_t *chunk);
// Save a config tree to a new memory chunk
zchunk_t *
zconfig_chunk_save (zconfig_t *self);
// Load a config tree from a null-terminated string
zconfig_t *
zconfig_str_load (const char *string);
// Save a config tree to a new null terminated string
char *
zconfig_str_save (zconfig_t *self);
// Return true if a configuration tree was loaded from a file and that
// file has changed in since the tree was loaded.
bool
zconfig_has_changed (zconfig_t *self);
// Print the config file to open stream
void
zconfig_fprint (zconfig_t *self, FILE *file);
// Print properties of object
void
zconfig_print (zconfig_t *self);
// Self test of this class
void
zconfig_test (bool verbose);
// CLASS: zdigest
// Constructor - creates new digest object, which you use to build up a
// digest by repeatedly calling zdigest_update() on chunks of data.
zdigest_t *
zdigest_new (void);
// Destroy a digest object
void
zdigest_destroy (zdigest_t **self_p);
// Add buffer into digest calculation
void
zdigest_update (zdigest_t *self, byte *buffer, size_t length);
// Return final digest hash data. If built without crypto support, returns
// NULL.
byte *
zdigest_data (zdigest_t *self);
// Return final digest hash size
size_t
zdigest_size (zdigest_t *self);
// Return digest as printable hex string; caller should not modify nor
// free this string. After calling this, you may not use zdigest_update()
// on the same digest. If built without crypto support, returns NULL.
char *
zdigest_string (zdigest_t *self);
// Self test of this class.
void
zdigest_test (bool verbose);
// CLASS: zdir
// Create a new directory item that loads in the full tree of the specified
// path, optionally located under some parent path. If parent is "-", then
// loads only the top-level directory, and does not use parent as a path.
zdir_t *
zdir_new (const char *path, const char *parent);
// Destroy a directory tree and all children it contains.
void
zdir_destroy (zdir_t **self_p);
// Return directory path
const char *
zdir_path (zdir_t *self);
// Return last modification time for directory.
time_t
zdir_modified (zdir_t *self);
// Return total hierarchy size, in bytes of data contained in all files
// in the directory tree.
off_t
zdir_cursize (zdir_t *self);
// Return directory count
size_t
zdir_count (zdir_t *self);
// Returns a sorted list of zfile objects; Each entry in the list is a pointer
// to a zfile_t item already allocated in the zdir tree. Do not destroy the
// original zdir tree until you are done with this list.
zlist_t *
zdir_list (zdir_t *self);
// Remove directory, optionally including all files that it contains, at
// all levels. If force is false, will only remove the directory if empty.
// If force is true, will remove all files and all subdirectories.
void
zdir_remove (zdir_t *self, bool force);
// Calculate differences between two versions of a directory tree.
// Returns a list of zdir_patch_t patches. Either older or newer may
// be null, indicating the directory is empty/absent. If alias is set,
// generates virtual filename (minus path, plus alias).
zlist_t *
zdir_diff (zdir_t *older, zdir_t *newer, const char *alias);
// Return full contents of directory as a zdir_patch list.
zlist_t *
zdir_resync (zdir_t *self, const char *alias);
// Load directory cache; returns a hash table containing the SHA-1 digests
// of every file in the tree. The cache is saved between runs in .cache.
zhash_t *
zdir_cache (zdir_t *self);
// Print contents of directory to open stream
void
zdir_fprint (zdir_t *self, FILE *file, int indent);
// Print contents of directory to stdout
void
zdir_print (zdir_t *self, int indent);
// Create a new zdir_watch actor instance:
//
// zactor_t *watch = zactor_new (zdir_watch, NULL);
//
// Destroy zdir_watch instance:
//
// zactor_destroy (&watch);
//
// Enable verbose logging of commands and activity:
//
// zstr_send (watch, "VERBOSE");
//
// Subscribe to changes to a directory path:
//
// zsock_send (watch, "ss", "SUBSCRIBE", "directory_path");
//
// Unsubscribe from changes to a directory path:
//
// zsock_send (watch, "ss", "UNSUBSCRIBE", "directory_path");
//
// Receive directory changes:
// zsock_recv (watch, "sp", &path, &patches);
//
// // Delete the received data.
// free (path);
// zlist_destroy (&patches);
void
zdir_watch (zsock_t *pipe, void *unused);
// Self test of this class.
void
zdir_test (bool verbose);
// CLASS: zdir_patch
// Create new patch
zdir_patch_t *
zdir_patch_new (const char *path, zfile_t *file, zdir_patch_op_t op, const char *alias);
// Destroy a patch
void
zdir_patch_destroy (zdir_patch_t **self_p);
// Create copy of a patch. If the patch is null, or memory was exhausted,
// returns null.
zdir_patch_t *
zdir_patch_dup (zdir_patch_t *self);
// Return patch file directory path
const char *
zdir_patch_path (zdir_patch_t *self);
// Return patch file item
zfile_t *
zdir_patch_file (zdir_patch_t *self);
// Return operation
zdir_patch_op_t
zdir_patch_op (zdir_patch_t *self);
// Return patch virtual file path
const char *
zdir_patch_vpath (zdir_patch_t *self);
// Calculate hash digest for file (create only)
void
zdir_patch_digest_set (zdir_patch_t *self);
// Return hash digest for patch file
const char *
zdir_patch_digest (zdir_patch_t *self);
// Self test of this class.
void
zdir_patch_test (bool verbose);
// CLASS: zfile
// If file exists, populates properties. CZMQ supports portable symbolic
// links, which are files with the extension ".ln". A symbolic link is a
// text file containing one line, the filename of a target file. Reading
// data from the symbolic link actually reads from the target file. Path
// may be NULL, in which case it is not used.
zfile_t *
zfile_new (const char *path, const char *name);
// Destroy a file item
void
zfile_destroy (zfile_t **self_p);
// Duplicate a file item, returns a newly constructed item. If the file
// is null, or memory was exhausted, returns null.
zfile_t *
zfile_dup (zfile_t *self);
// Return file name, remove path if provided
const char *
zfile_filename (zfile_t *self, const char *path);
// Refresh file properties from disk; this is not done automatically
// on access methods, otherwise it is not possible to compare directory
// snapshots.
void
zfile_restat (zfile_t *self);
// Return when the file was last modified. If you want this to reflect the
// current situation, call zfile_restat before checking this property.
time_t
zfile_modified (zfile_t *self);
// Return the last-known size of the file. If you want this to reflect the
// current situation, call zfile_restat before checking this property.
off_t
zfile_cursize (zfile_t *self);
// Return true if the file is a directory. If you want this to reflect
// any external changes, call zfile_restat before checking this property.
bool
zfile_is_directory (zfile_t *self);
// Return true if the file is a regular file. If you want this to reflect
// any external changes, call zfile_restat before checking this property.
bool
zfile_is_regular (zfile_t *self);
// Return true if the file is readable by this process. If you want this to
// reflect any external changes, call zfile_restat before checking this
// property.
bool
zfile_is_readable (zfile_t *self);
// Return true if the file is writeable by this process. If you want this
// to reflect any external changes, call zfile_restat before checking this
// property.
bool
zfile_is_writeable (zfile_t *self);
// Check if file has stopped changing and can be safely processed.
// Updates the file statistics from disk at every call.
bool
zfile_is_stable (zfile_t *self);
// Return true if the file was changed on disk since the zfile_t object
// was created, or the last zfile_restat() call made on it.
bool
zfile_has_changed (zfile_t *self);
// Remove the file from disk
void
zfile_remove (zfile_t *self);
// Open file for reading
// Returns 0 if OK, -1 if not found or not accessible
int
zfile_input (zfile_t *self);
// Open file for writing, creating directory if needed
// File is created if necessary; chunks can be written to file at any
// location. Returns 0 if OK, -1 if error.
int
zfile_output (zfile_t *self);
// Read chunk from file at specified position. If this was the last chunk,
// sets the eof property. Returns a null chunk in case of error.
zchunk_t *
zfile_read (zfile_t *self, size_t bytes, off_t offset);
// Returns true if zfile_read() just read the last chunk in the file.
bool
zfile_eof (zfile_t *self);
// Write chunk to file at specified position
// Return 0 if OK, else -1
int
zfile_write (zfile_t *self, zchunk_t *chunk, off_t offset);
// Read next line of text from file. Returns a pointer to the text line,
// or NULL if there was nothing more to read from the file.
const char *
zfile_readln (zfile_t *self);
// Close file, if open
void
zfile_close (zfile_t *self);
// Return file handle, if opened
FILE *
zfile_handle (zfile_t *self);
// Calculate SHA1 digest for file, using zdigest class.
const char *
zfile_digest (zfile_t *self);
// Self test of this class.
void
zfile_test (bool verbose);
// CLASS: zframe
// Create a new frame. If size is not null, allocates the frame data
// to the specified size. If additionally, data is not null, copies
// size octets from the specified data into the frame body.
zframe_t *
zframe_new (const void *data, size_t size);
// Destroy a frame
void
zframe_destroy (zframe_t **self_p);
// Create an empty (zero-sized) frame
zframe_t *
zframe_new_empty (void);
// Create a frame with a specified string content.
zframe_t *
zframe_from (const char *string);
// Receive frame from socket, returns zframe_t object or NULL if the recv
// was interrupted. Does a blocking recv, if you want to not block then use
// zpoller or zloop.
zframe_t *
zframe_recv (void *source);
// Send a frame to a socket, destroy frame after sending.
// Return -1 on error, 0 on success.
int
zframe_send (zframe_t **self_p, void *dest, int flags);
// Return number of bytes in frame data
size_t
zframe_size (zframe_t *self);
// Return address of frame data
byte *
zframe_data (zframe_t *self);
// Create a new frame that duplicates an existing frame. If frame is null,
// or memory was exhausted, returns null.
zframe_t *
zframe_dup (zframe_t *self);
// Return frame data encoded as printable hex string, useful for 0MQ UUIDs.
// Caller must free string when finished with it.
char *
zframe_strhex (zframe_t *self);
// Return frame data copied into freshly allocated string
// Caller must free string when finished with it.
char *
zframe_strdup (zframe_t *self);
// Return TRUE if frame body is equal to string, excluding terminator
bool
zframe_streq (zframe_t *self, const char *string);
// Return frame MORE indicator (1 or 0), set when reading frame from socket
// or by the zframe_set_more() method
int
zframe_more (zframe_t *self);
// Set frame MORE indicator (1 or 0). Note this is NOT used when sending
// frame to socket, you have to specify flag explicitly.
void
zframe_set_more (zframe_t *self, int more);
// Return frame routing ID, if the frame came from a ZMQ_SERVER socket.
// Else returns zero.
uint32_t
zframe_routing_id (zframe_t *self);
// Set routing ID on frame. This is used if/when the frame is sent to a
// ZMQ_SERVER socket.
void
zframe_set_routing_id (zframe_t *self, uint32_t routing_id);
// Return TRUE if two frames have identical size and data
// If either frame is NULL, equality is always false.
bool
zframe_eq (zframe_t *self, zframe_t *other);
// Set new contents for frame
void
zframe_reset (zframe_t *self, const void *data, size_t size);
// Send message to zsys log sink (may be stdout, or system facility as
// configured by zsys_set_logstream). Prefix shows before frame, if not null.
void
zframe_print (zframe_t *self, const char *prefix);
// Probe the supplied object, and report if it looks like a zframe_t.
bool
zframe_is (void *self);
// Self test of this class.
void
zframe_test (bool verbose);
// CLASS: zhash
// Create a new, empty hash container
zhash_t *
zhash_new (void);
// Destroy a hash container and all items in it
void
zhash_destroy (zhash_t **self_p);
// Unpack binary frame into a new hash table. Packed data must follow format
// defined by zhash_pack. Hash table is set to autofree. An empty frame
// unpacks to an empty hash table.
zhash_t *
zhash_unpack (zframe_t *frame);
// Insert item into hash table with specified key and item.
// If key is already present returns -1 and leaves existing item unchanged
// Returns 0 on success.
int
zhash_insert (zhash_t *self, const char *key, void *item);
// Update item into hash table with specified key and item.
// If key is already present, destroys old item and inserts new one.
// Use free_fn method to ensure deallocator is properly called on item.
void
zhash_update (zhash_t *self, const char *key, void *item);
// Remove an item specified by key from the hash table. If there was no such
// item, this function does nothing.
void
zhash_delete (zhash_t *self, const char *key);
// Return the item at the specified key, or null
void *
zhash_lookup (zhash_t *self, const char *key);
// Reindexes an item from an old key to a new key. If there was no such
// item, does nothing. Returns 0 if successful, else -1.
int
zhash_rename (zhash_t *self, const char *old_key, const char *new_key);
// Set a free function for the specified hash table item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when hash items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zhash_freefn (zhash_t *self, const char *key, zhash_free_fn free_fn);
// Return the number of keys/items in the hash table
size_t
zhash_size (zhash_t *self);
// Make copy of hash table; if supplied table is null, returns null.
// Does not copy items themselves. Rebuilds new table so may be slow on
// very large tables. NOTE: only works with item values that are strings
// since there's no other way to know how to duplicate the item value.
zhash_t *
zhash_dup (zhash_t *self);
// Return keys for items in table
zlist_t *
zhash_keys (zhash_t *self);
// Simple iterator; returns first item in hash table, in no given order,
// or NULL if the table is empty. This method is simpler to use than the
// foreach() method, which is deprecated. To access the key for this item
// use zhash_cursor(). NOTE: do NOT modify the table while iterating.
void *
zhash_first (zhash_t *self);
// Simple iterator; returns next item in hash table, in no given order,
// or NULL if the last item was already returned. Use this together with
// zhash_first() to process all items in a hash table. If you need the
// items in sorted order, use zhash_keys() and then zlist_sort(). To
// access the key for this item use zhash_cursor(). NOTE: do NOT modify
// the table while iterating.
void *
zhash_next (zhash_t *self);
// After a successful first/next method, returns the key for the item that
// was returned. This is a constant string that you may not modify or
// deallocate, and which lasts as long as the item in the hash. After an
// unsuccessful first/next, returns NULL.
const char *
zhash_cursor (zhash_t *self);
// Add a comment to hash table before saving to disk. You can add as many
// comment lines as you like. These comment lines are discarded when loading
// the file. If you use a null format, all comments are deleted.
void
zhash_comment (zhash_t *self, const char *format, ...);
// Serialize hash table to a binary frame that can be sent in a message.
// The packed format is compatible with the 'dictionary' type defined in
// http://rfc.zeromq.org/spec:35/FILEMQ, and implemented by zproto:
//
// ; A list of name/value pairs
// dictionary = dict-count *( dict-name dict-value )
// dict-count = number-4
// dict-value = longstr
// dict-name = string
//
// ; Strings are always length + text contents
// longstr = number-4 *VCHAR
// string = number-1 *VCHAR
//
// ; Numbers are unsigned integers in network byte order
// number-1 = 1OCTET
// number-4 = 4OCTET
//
// Comments are not included in the packed data. Item values MUST be
// strings.
zframe_t *
zhash_pack (zhash_t *self);
// Save hash table to a text file in name=value format. Hash values must be
// printable strings; keys may not contain '=' character. Returns 0 if OK,
// else -1 if a file error occurred.
int
zhash_save (zhash_t *self, const char *filename);
// Load hash table from a text file in name=value format; hash table must
// already exist. Hash values must printable strings; keys may not contain
// '=' character. Returns 0 if OK, else -1 if a file was not readable.
int
zhash_load (zhash_t *self, const char *filename);
// When a hash table was loaded from a file by zhash_load, this method will
// reload the file if it has been modified since, and is "stable", i.e. not
// still changing. Returns 0 if OK, -1 if there was an error reloading the
// file.
int
zhash_refresh (zhash_t *self);
// Set hash for automatic value destruction
void
zhash_autofree (zhash_t *self);
// Apply function to each item in the hash table. Items are iterated in no
// defined order. Stops if callback function returns non-zero and returns
// final return code from callback function (zero = success). Deprecated.
int
zhash_foreach (zhash_t *self, zhash_foreach_fn callback, void *argument);
// Self test of this class.
void
zhash_test (bool verbose);
// CLASS: zhashx
// Create a new, empty hash container
zhashx_t *
zhashx_new (void);
// Destroy a hash container and all items in it
void
zhashx_destroy (zhashx_t **self_p);
// Unpack binary frame into a new hash table. Packed data must follow format
// defined by zhashx_pack. Hash table is set to autofree. An empty frame
// unpacks to an empty hash table.
zhashx_t *
zhashx_unpack (zframe_t *frame);
// Insert item into hash table with specified key and item.
// If key is already present returns -1 and leaves existing item unchanged
// Returns 0 on success.
int
zhashx_insert (zhashx_t *self, const void *key, void *item);
// Update or insert item into hash table with specified key and item. If the
// key is already present, destroys old item and inserts new one. If you set
// a container item destructor, this is called on the old value. If the key
// was not already present, inserts a new item. Sets the hash cursor to the
// new item.
void
zhashx_update (zhashx_t *self, const void *key, void *item);
// Remove an item specified by key from the hash table. If there was no such
// item, this function does nothing.
void
zhashx_delete (zhashx_t *self, const void *key);
// Delete all items from the hash table. If the key destructor is
// set, calls it on every key. If the item destructor is set, calls
// it on every item.
void
zhashx_purge (zhashx_t *self);
// Return the item at the specified key, or null
void *
zhashx_lookup (zhashx_t *self, const void *key);
// Reindexes an item from an old key to a new key. If there was no such
// item, does nothing. Returns 0 if successful, else -1.
int
zhashx_rename (zhashx_t *self, const void *old_key, const void *new_key);
// Set a free function for the specified hash table item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when hash items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zhashx_freefn (zhashx_t *self, const void *key, zhashx_free_fn free_fn);
// Return the number of keys/items in the hash table
size_t
zhashx_size (zhashx_t *self);
// Return a zlistx_t containing the keys for the items in the
// table. Uses the key_duplicator to duplicate all keys and sets the
// key_destructor as destructor for the list.
zlistx_t *
zhashx_keys (zhashx_t *self);
// Return a zlistx_t containing the values for the items in the
// table. Uses the duplicator to duplicate all items and sets the
// destructor as destructor for the list.
zlistx_t *
zhashx_values (zhashx_t *self);
// Simple iterator; returns first item in hash table, in no given order,
// or NULL if the table is empty. This method is simpler to use than the
// foreach() method, which is deprecated. To access the key for this item
// use zhashx_cursor(). NOTE: do NOT modify the table while iterating.
void *
zhashx_first (zhashx_t *self);
// Simple iterator; returns next item in hash table, in no given order,
// or NULL if the last item was already returned. Use this together with
// zhashx_first() to process all items in a hash table. If you need the
// items in sorted order, use zhashx_keys() and then zlistx_sort(). To
// access the key for this item use zhashx_cursor(). NOTE: do NOT modify
// the table while iterating.
void *
zhashx_next (zhashx_t *self);
// After a successful first/next method, returns the key for the item that
// was returned. This is a constant string that you may not modify or
// deallocate, and which lasts as long as the item in the hash. After an
// unsuccessful first/next, returns NULL.
const void *
zhashx_cursor (zhashx_t *self);
// Add a comment to hash table before saving to disk. You can add as many
// comment lines as you like. These comment lines are discarded when loading
// the file. If you use a null format, all comments are deleted.
void
zhashx_comment (zhashx_t *self, const char *format, ...);
// Save hash table to a text file in name=value format. Hash values must be
// printable strings; keys may not contain '=' character. Returns 0 if OK,
// else -1 if a file error occurred.
int
zhashx_save (zhashx_t *self, const char *filename);
// Load hash table from a text file in name=value format; hash table must
// already exist. Hash values must printable strings; keys may not contain
// '=' character. Returns 0 if OK, else -1 if a file was not readable.
int
zhashx_load (zhashx_t *self, const char *filename);
// When a hash table was loaded from a file by zhashx_load, this method will
// reload the file if it has been modified since, and is "stable", i.e. not
// still changing. Returns 0 if OK, -1 if there was an error reloading the
// file.
int
zhashx_refresh (zhashx_t *self);
// Serialize hash table to a binary frame that can be sent in a message.
// The packed format is compatible with the 'dictionary' type defined in
// http://rfc.zeromq.org/spec:35/FILEMQ, and implemented by zproto:
//
// ; A list of name/value pairs
// dictionary = dict-count *( dict-name dict-value )
// dict-count = number-4
// dict-value = longstr
// dict-name = string
//
// ; Strings are always length + text contents
// longstr = number-4 *VCHAR
// string = number-1 *VCHAR
//
// ; Numbers are unsigned integers in network byte order
// number-1 = 1OCTET
// number-4 = 4OCTET
//
// Comments are not included in the packed data. Item values MUST be
// strings.
zframe_t *
zhashx_pack (zhashx_t *self);
// Make a copy of the list; items are duplicated if you set a duplicator
// for the list, otherwise not. Copying a null reference returns a null
// reference. Note that this method's behavior changed slightly for CZMQ
// v3.x, as it does not set nor respect autofree. It does however let you
// duplicate any hash table safely. The old behavior is in zhashx_dup_v2.
zhashx_t *
zhashx_dup (zhashx_t *self);
// Set a user-defined deallocator for hash items; by default items are not
// freed when the hash is destroyed.
void
zhashx_set_destructor (zhashx_t *self, zhashx_destructor_fn destructor);
// Set a user-defined duplicator for hash items; by default items are not
// copied when the hash is duplicated.
void
zhashx_set_duplicator (zhashx_t *self, zhashx_duplicator_fn duplicator);
// Set a user-defined deallocator for keys; by default keys are freed
// when the hash is destroyed using free().
void
zhashx_set_key_destructor (zhashx_t *self, zhashx_destructor_fn destructor);
// Set a user-defined duplicator for keys; by default keys are duplicated
// using strdup.
void
zhashx_set_key_duplicator (zhashx_t *self, zhashx_duplicator_fn duplicator);
// Set a user-defined comparator for keys; by default keys are
// compared using strcmp.
void
zhashx_set_key_comparator (zhashx_t *self, zhashx_comparator_fn comparator);
// Set a user-defined comparator for keys; by default keys are
// compared using strcmp.
void
zhashx_set_key_hasher (zhashx_t *self, zhashx_hash_fn hasher);
// Make copy of hash table; if supplied table is null, returns null.
// Does not copy items themselves. Rebuilds new table so may be slow on
// very large tables. NOTE: only works with item values that are strings
// since there's no other way to know how to duplicate the item value.
zhashx_t *
zhashx_dup_v2 (zhashx_t *self);
// DEPRECATED as clumsy -- use set_destructor instead
// Set hash for automatic value destruction
void
zhashx_autofree (zhashx_t *self);
// DEPRECATED as clumsy -- use zhashx_first/_next instead
// Apply function to each item in the hash table. Items are iterated in no
// defined order. Stops if callback function returns non-zero and returns
// final return code from callback function (zero = success).
// Callback function for zhashx_foreach method
int
zhashx_foreach (zhashx_t *self, zhashx_foreach_fn callback, void *argument);
// Self test of this class.
void
zhashx_test (bool verbose);
// CLASS: ziflist
// Get a list of network interfaces currently defined on the system
ziflist_t *
ziflist_new (void);
// Destroy a ziflist instance
void
ziflist_destroy (ziflist_t **self_p);
// Reload network interfaces from system
void
ziflist_reload (ziflist_t *self);
// Return the number of network interfaces on system
size_t
ziflist_size (ziflist_t *self);
// Get first network interface, return NULL if there are none
const char *
ziflist_first (ziflist_t *self);
// Get next network interface, return NULL if we hit the last one
const char *
ziflist_next (ziflist_t *self);
// Return the current interface IP address as a printable string
const char *
ziflist_address (ziflist_t *self);
// Return the current interface broadcast address as a printable string
const char *
ziflist_broadcast (ziflist_t *self);
// Return the current interface network mask as a printable string
const char *
ziflist_netmask (ziflist_t *self);
// Return the list of interfaces.
void
ziflist_print (ziflist_t *self);
// Self test of this class.
void
ziflist_test (bool verbose);
// CLASS: zlist
// Create a new list container
zlist_t *
zlist_new (void);
// Destroy a list container
void
zlist_destroy (zlist_t **self_p);
// Return the item at the head of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the head item, or NULL if the list is empty.
void *
zlist_first (zlist_t *self);
// Return the next item. If the list is empty, returns NULL. To move to
// the start of the list call zlist_first (). Advances the cursor.
void *
zlist_next (zlist_t *self);
// Return the item at the tail of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the tail item, or NULL if the list is empty.
void *
zlist_last (zlist_t *self);
// Return first item in the list, or null, leaves the cursor
void *
zlist_head (zlist_t *self);
// Return last item in the list, or null, leaves the cursor
void *
zlist_tail (zlist_t *self);
// Return the current item of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the current item, or NULL if the list is empty.
void *
zlist_item (zlist_t *self);
// Append an item to the end of the list, return 0 if OK or -1 if this
// failed for some reason (out of memory). Note that if a duplicator has
// been set, this method will also duplicate the item.
int
zlist_append (zlist_t *self, void *item);
// Push an item to the start of the list, return 0 if OK or -1 if this
// failed for some reason (out of memory). Note that if a duplicator has
// been set, this method will also duplicate the item.
int
zlist_push (zlist_t *self, void *item);
// Pop the item off the start of the list, if any
void *
zlist_pop (zlist_t *self);
// Checks if an item already is present. Uses compare method to determine if
// items are equal. If the compare method is NULL the check will only compare
// pointers. Returns true if item is present else false.
bool
zlist_exists (zlist_t *self, void *item);
// Remove the specified item from the list if present
void
zlist_remove (zlist_t *self, void *item);
// Make a copy of list. If the list has autofree set, the copied list will
// duplicate all items, which must be strings. Otherwise, the list will hold
// pointers back to the items in the original list. If list is null, returns
// NULL.
zlist_t *
zlist_dup (zlist_t *self);
// Purge all items from list
void
zlist_purge (zlist_t *self);
// Return number of items in the list
size_t
zlist_size (zlist_t *self);
// Sort the list by ascending key value using a straight ASCII comparison.
// The sort is not stable, so may reorder items with the same keys.
void
zlist_sort (zlist_t *self, zlist_compare_fn compare);
// Set list for automatic item destruction; item values MUST be strings.
// By default a list item refers to a value held elsewhere. When you set
// this, each time you append or push a list item, zlist will take a copy
// of the string value. Then, when you destroy the list, it will free all
// item values automatically. If you use any other technique to allocate
// list values, you must free them explicitly before destroying the list.
// The usual technique is to pop list items and destroy them, until the
// list is empty.
void
zlist_autofree (zlist_t *self);
// Sets a compare function for this list. The function compares two items.
// It returns an integer less than, equal to, or greater than zero if the
// first item is found, respectively, to be less than, to match, or be
// greater than the second item.
// This function is used for sorting, removal and exists checking.
void
zlist_comparefn (zlist_t *self, zlist_compare_fn fn);
// Set a free function for the specified list item. When the item is
// destroyed, the free function, if any, is called on that item.
// Use this when list items are dynamically allocated, to ensure that
// you don't have memory leaks. You can pass 'free' or NULL as a free_fn.
// Returns the item, or NULL if there is no such item.
void *
zlist_freefn (zlist_t *self, void *item, zlist_free_fn fn, bool at_tail);
// Self test of this class.
void
zlist_test (bool verbose);
// CLASS: zlistx
// Create a new, empty list.
zlistx_t *
zlistx_new (void);
// Destroy a list. If an item destructor was specified, all items in the
// list are automatically destroyed as well.
void
zlistx_destroy (zlistx_t **self_p);
// Add an item to the head of the list. Calls the item duplicator, if any,
// on the item. Resets cursor to list head. Returns an item handle on
// success, NULL if memory was exhausted.
void *
zlistx_add_start (zlistx_t *self, void *item);
// Add an item to the tail of the list. Calls the item duplicator, if any,
// on the item. Resets cursor to list head. Returns an item handle on
// success, NULL if memory was exhausted.
void *
zlistx_add_end (zlistx_t *self, void *item);
// Return the number of items in the list
size_t
zlistx_size (zlistx_t *self);
// Return first item in the list, or null, leaves the cursor
void *
zlistx_head (zlistx_t *self);
// Return last item in the list, or null, leaves the cursor
void *
zlistx_tail (zlistx_t *self);
// Return the item at the head of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the head item, or NULL if the list is empty.
void *
zlistx_first (zlistx_t *self);
// Return the next item. At the end of the list (or in an empty list),
// returns NULL. Use repeated zlistx_next () calls to work through the list
// from zlistx_first (). First time, acts as zlistx_first().
void *
zlistx_next (zlistx_t *self);
// Return the previous item. At the start of the list (or in an empty list),
// returns NULL. Use repeated zlistx_prev () calls to work through the list
// backwards from zlistx_last (). First time, acts as zlistx_last().
void *
zlistx_prev (zlistx_t *self);
// Return the item at the tail of list. If the list is empty, returns NULL.
// Leaves cursor pointing at the tail item, or NULL if the list is empty.
void *
zlistx_last (zlistx_t *self);
// Returns the value of the item at the cursor, or NULL if the cursor is
// not pointing to an item.
void *
zlistx_item (zlistx_t *self);
// Returns the handle of the item at the cursor, or NULL if the cursor is
// not pointing to an item.
void *
zlistx_cursor (zlistx_t *self);
// Returns the item associated with the given list handle, or NULL if passed
// in handle is NULL. Asserts that the passed in handle points to a list element.
void *
zlistx_handle_item (void *handle);
// Find an item in the list, searching from the start. Uses the item
// comparator, if any, else compares item values directly. Returns the
// item handle found, or NULL. Sets the cursor to the found item, if any.
void *
zlistx_find (zlistx_t *self, void *item);
// Detach an item from the list, using its handle. The item is not modified,
// and the caller is responsible for destroying it if necessary. If handle is
// null, detaches the first item on the list. Returns item that was detached,
// or null if none was. If cursor was at item, moves cursor to previous item,
// so you can detach items while iterating forwards through a list.
void *
zlistx_detach (zlistx_t *self, void *handle);
// Detach item at the cursor, if any, from the list. The item is not modified,
// and the caller is responsible for destroying it as necessary. Returns item
// that was detached, or null if none was. Moves cursor to previous item, so
// you can detach items while iterating forwards through a list.
void *
zlistx_detach_cur (zlistx_t *self);
// Delete an item, using its handle. Calls the item destructor is any is
// set. If handle is null, deletes the first item on the list. Returns 0
// if an item was deleted, -1 if not. If cursor was at item, moves cursor
// to previous item, so you can delete items while iterating forwards
// through a list.
int
zlistx_delete (zlistx_t *self, void *handle);
// Move an item to the start of the list, via its handle.
void
zlistx_move_start (zlistx_t *self, void *handle);
// Move an item to the end of the list, via its handle.
void
zlistx_move_end (zlistx_t *self, void *handle);
// Remove all items from the list, and destroy them if the item destructor
// is set.
void
zlistx_purge (zlistx_t *self);
// Sort the list. If an item comparator was set, calls that to compare
// items, otherwise compares on item value. The sort is not stable, so may
// reorder equal items.
void
zlistx_sort (zlistx_t *self);
// Create a new node and insert it into a sorted list. Calls the item
// duplicator, if any, on the item. If low_value is true, starts searching
// from the start of the list, otherwise searches from the end. Use the item
// comparator, if any, to find where to place the new node. Returns a handle
// to the new node, or NULL if memory was exhausted. Resets the cursor to the
// list head.
void *
zlistx_insert (zlistx_t *self, void *item, bool low_value);
// Move an item, specified by handle, into position in a sorted list. Uses
// the item comparator, if any, to determine the new location. If low_value
// is true, starts searching from the start of the list, otherwise searches
// from the end.
void
zlistx_reorder (zlistx_t *self, void *handle, bool low_value);
// Make a copy of the list; items are duplicated if you set a duplicator
// for the list, otherwise not. Copying a null reference returns a null
// reference.
zlistx_t *
zlistx_dup (zlistx_t *self);
// Set a user-defined deallocator for list items; by default items are not
// freed when the list is destroyed.
void
zlistx_set_destructor (zlistx_t *self, zlistx_destructor_fn destructor);
// Set a user-defined duplicator for list items; by default items are not
// copied when the list is duplicated.
void
zlistx_set_duplicator (zlistx_t *self, zlistx_duplicator_fn duplicator);
// Set a user-defined comparator for zlistx_find and zlistx_sort; the method
// must return -1, 0, or 1 depending on whether item1 is less than, equal to,
// or greater than, item2.
void
zlistx_set_comparator (zlistx_t *self, zlistx_comparator_fn comparator);
// Self test of this class.
void
zlistx_test (bool verbose);
// CLASS: zloop
// Create a new zloop reactor
zloop_t *
zloop_new (void);
// Destroy a reactor
void
zloop_destroy (zloop_t **self_p);
// Register socket reader with the reactor. When the reader has messages,
// the reactor will call the handler, passing the arg. Returns 0 if OK, -1
// if there was an error. If you register the same socket more than once,
// each instance will invoke its corresponding handler.
int
zloop_reader (zloop_t *self, zsock_t *sock, zloop_reader_fn handler, void *arg);
// Cancel a socket reader from the reactor. If multiple readers exist for
// same socket, cancels ALL of them.
void
zloop_reader_end (zloop_t *self, zsock_t *sock);
// Configure a registered reader to ignore errors. If you do not set this,
// then readers that have errors are removed from the reactor silently.
void
zloop_reader_set_tolerant (zloop_t *self, zsock_t *sock);
// Register low-level libzmq pollitem with the reactor. When the pollitem
// is ready, will call the handler, passing the arg. Returns 0 if OK, -1
// if there was an error. If you register the pollitem more than once, each
// instance will invoke its corresponding handler. A pollitem with
// socket=NULL and fd=0 means 'poll on FD zero'.
int
zloop_poller (zloop_t *self, zmq_pollitem_t *item, zloop_fn handler, void *arg);
// Cancel a pollitem from the reactor, specified by socket or FD. If both
// are specified, uses only socket. If multiple poll items exist for same
// socket/FD, cancels ALL of them.
void
zloop_poller_end (zloop_t *self, zmq_pollitem_t *item);
// Configure a registered poller to ignore errors. If you do not set this,
// then poller that have errors are removed from the reactor silently.
void
zloop_poller_set_tolerant (zloop_t *self, zmq_pollitem_t *item);
// Register a timer that expires after some delay and repeats some number of
// times. At each expiry, will call the handler, passing the arg. To run a
// timer forever, use 0 times. Returns a timer_id that is used to cancel the
// timer in the future. Returns -1 if there was an error.
int
zloop_timer (zloop_t *self, size_t delay, size_t times, zloop_timer_fn handler, void *arg);
// Cancel a specific timer identified by a specific timer_id (as returned by
// zloop_timer).
int
zloop_timer_end (zloop_t *self, int timer_id);
// Register a ticket timer. Ticket timers are very fast in the case where
// you use a lot of timers (thousands), and frequently remove and add them.
// The main use case is expiry timers for servers that handle many clients,
// and which reset the expiry timer for each message received from a client.
// Whereas normal timers perform poorly as the number of clients grows, the
// cost of ticket timers is constant, no matter the number of clients. You
// must set the ticket delay using zloop_set_ticket_delay before creating a
// ticket. Returns a handle to the timer that you should use in
// zloop_ticket_reset and zloop_ticket_delete.
void *
zloop_ticket (zloop_t *self, zloop_timer_fn handler, void *arg);
// Reset a ticket timer, which moves it to the end of the ticket list and
// resets its execution time. This is a very fast operation.
void
zloop_ticket_reset (zloop_t *self, void *handle);
// Delete a ticket timer. We do not actually delete the ticket here, as
// other code may still refer to the ticket. We mark as deleted, and remove
// later and safely.
void
zloop_ticket_delete (zloop_t *self, void *handle);
// Set the ticket delay, which applies to all tickets. If you lower the
// delay and there are already tickets created, the results are undefined.
void
zloop_set_ticket_delay (zloop_t *self, size_t ticket_delay);
// Set hard limit on number of timers allowed. Setting more than a small
// number of timers (10-100) can have a dramatic impact on the performance
// of the reactor. For high-volume cases, use ticket timers. If the hard
// limit is reached, the reactor stops creating new timers and logs an
// error.
void
zloop_set_max_timers (zloop_t *self, size_t max_timers);
// Set verbose tracing of reactor on/off. The default verbose setting is
// off (false).
void
zloop_set_verbose (zloop_t *self, bool verbose);
// By default the reactor stops if the process receives a SIGINT or SIGTERM
// signal. This makes it impossible to shut-down message based architectures
// like zactors. This method lets you switch off break handling. The default
// nonstop setting is off (false).
void
zloop_set_nonstop (zloop_t *self, bool nonstop);
// Start the reactor. Takes control of the thread and returns when the 0MQ
// context is terminated or the process is interrupted, or any event handler
// returns -1. Event handlers may register new sockets and timers, and
// cancel sockets. Returns 0 if interrupted, -1 if canceled by a handler.
int
zloop_start (zloop_t *self);
// Self test of this class.
void
zloop_test (bool verbose);
// CLASS: zmsg
// Create a new empty message object
zmsg_t *
zmsg_new (void);
// Destroy a message object and all frames it contains
void
zmsg_destroy (zmsg_t **self_p);
// Receive message from socket, returns zmsg_t object or NULL if the recv
// was interrupted. Does a blocking recv. If you want to not block then use
// the zloop class or zmsg_recv_nowait or zmq_poll to check for socket input
// before receiving.
zmsg_t *
zmsg_recv (void *source);
// Load/append an open file into new message, return the message.
// Returns NULL if the message could not be loaded.
zmsg_t *
zmsg_load (FILE *file);
// Decodes a serialized message buffer created by zmsg_encode () and returns
// a new zmsg_t object. Returns NULL if the buffer was badly formatted or
// there was insufficient memory to work.
zmsg_t *
zmsg_decode (const byte *buffer, size_t buffer_size);
// Generate a signal message encoding the given status. A signal is a short
// message carrying a 1-byte success/failure code (by convention, 0 means
// OK). Signals are encoded to be distinguishable from "normal" messages.
zmsg_t *
zmsg_new_signal (byte status);
// Send message to destination socket, and destroy the message after sending
// it successfully. If the message has no frames, sends nothing but destroys
// the message anyhow. Nullifies the caller's reference to the message (as
// it is a destructor).
int
zmsg_send (zmsg_t **self_p, void *dest);
// Send message to destination socket as part of a multipart sequence, and
// destroy the message after sending it successfully. Note that after a
// zmsg_sendm, you must call zmsg_send or another method that sends a final
// message part. If the message has no frames, sends nothing but destroys
// the message anyhow. Nullifies the caller's reference to the message (as
// it is a destructor).
int
zmsg_sendm (zmsg_t **self_p, void *dest);
// Return size of message, i.e. number of frames (0 or more).
size_t
zmsg_size (zmsg_t *self);
// Return total size of all frames in message.
size_t
zmsg_content_size (zmsg_t *self);
// Return message routing ID, if the message came from a ZMQ_SERVER socket.
// Else returns zero.
uint32_t
zmsg_routing_id (zmsg_t *self);
// Set routing ID on message. This is used if/when the message is sent to a
// ZMQ_SERVER socket.
void
zmsg_set_routing_id (zmsg_t *self, uint32_t routing_id);
// Push frame to the front of the message, i.e. before all other frames.
// Message takes ownership of frame, will destroy it when message is sent.
// Returns 0 on success, -1 on error. Deprecates zmsg_push, which did not
// nullify the caller's frame reference.
int
zmsg_prepend (zmsg_t *self, zframe_t **frame_p);
// Add frame to the end of the message, i.e. after all other frames.
// Message takes ownership of frame, will destroy it when message is sent.
// Returns 0 on success. Deprecates zmsg_add, which did not nullify the
// caller's frame reference.
int
zmsg_append (zmsg_t *self, zframe_t **frame_p);
// Remove first frame from message, if any. Returns frame, or NULL.
zframe_t *
zmsg_pop (zmsg_t *self);
// Push block of memory to front of message, as a new frame.
// Returns 0 on success, -1 on error.
int
zmsg_pushmem (zmsg_t *self, const void *src, size_t size);
// Add block of memory to the end of the message, as a new frame.
// Returns 0 on success, -1 on error.
int
zmsg_addmem (zmsg_t *self, const void *src, size_t size);
// Push string as new frame to front of message.
// Returns 0 on success, -1 on error.
int
zmsg_pushstr (zmsg_t *self, const char *string);
// Push string as new frame to end of message.
// Returns 0 on success, -1 on error.
int
zmsg_addstr (zmsg_t *self, const char *string);
// Push formatted string as new frame to front of message.
// Returns 0 on success, -1 on error.
int
zmsg_pushstrf (zmsg_t *self, const char *format, ...);
// Push formatted string as new frame to end of message.
// Returns 0 on success, -1 on error.
int
zmsg_addstrf (zmsg_t *self, const char *format, ...);
// Pop frame off front of message, return as fresh string. If there were
// no more frames in the message, returns NULL.
char *
zmsg_popstr (zmsg_t *self);
// Push encoded message as a new frame. Message takes ownership of
// submessage, so the original is destroyed in this call. Returns 0 on
// success, -1 on error.
int
zmsg_addmsg (zmsg_t *self, zmsg_t **msg_p);
// Remove first submessage from message, if any. Returns zmsg_t, or NULL if
// decoding was not succesful.
zmsg_t *
zmsg_popmsg (zmsg_t *self);
// Remove specified frame from list, if present. Does not destroy frame.
void
zmsg_remove (zmsg_t *self, zframe_t *frame);
// Set cursor to first frame in message. Returns frame, or NULL, if the
// message is empty. Use this to navigate the frames as a list.
zframe_t *
zmsg_first (zmsg_t *self);
// Return the next frame. If there are no more frames, returns NULL. To move
// to the first frame call zmsg_first(). Advances the cursor.
zframe_t *
zmsg_next (zmsg_t *self);
// Return the last frame. If there are no frames, returns NULL.
zframe_t *
zmsg_last (zmsg_t *self);
// Save message to an open file, return 0 if OK, else -1. The message is
// saved as a series of frames, each with length and data. Note that the
// file is NOT guaranteed to be portable between operating systems, not
// versions of CZMQ. The file format is at present undocumented and liable
// to arbitrary change.
int
zmsg_save (zmsg_t *self, FILE *file);
// Serialize multipart message to a single buffer. Use this method to send
// structured messages across transports that do not support multipart data.
// Allocates and returns a new buffer containing the serialized message.
// To decode a serialized message buffer, use zmsg_decode ().
size_t
zmsg_encode (zmsg_t *self, byte **buffer);
// Create copy of message, as new message object. Returns a fresh zmsg_t
// object. If message is null, or memory was exhausted, returns null.
zmsg_t *
zmsg_dup (zmsg_t *self);
// Send message to zsys log sink (may be stdout, or system facility as
// configured by zsys_set_logstream).
void
zmsg_print (zmsg_t *self);
// Return true if the two messages have the same number of frames and each
// frame in the first message is identical to the corresponding frame in the
// other message. As with zframe_eq, return false if either message is NULL.
bool
zmsg_eq (zmsg_t *self, zmsg_t *other);
// Return signal value, 0 or greater, if message is a signal, -1 if not.
int
zmsg_signal (zmsg_t *self);
// Probe the supplied object, and report if it looks like a zmsg_t.
bool
zmsg_is (void *self);
// Self test of this class.
void
zmsg_test (bool verbose);
// CLASS: zpoller
// Create new poller, specifying zero or more readers. The list of
// readers ends in a NULL. Each reader can be a zsock_t instance, a
// zactor_t instance, a libzmq socket (void *), or a file handle.
zpoller_t *
zpoller_new (void *reader, ...);
// Destroy a poller
void
zpoller_destroy (zpoller_t **self_p);
// Add a reader to be polled. Returns 0 if OK, -1 on failure. The reader may
// be a libzmq void * socket, a zsock_t instance, or a zactor_t instance.
int
zpoller_add (zpoller_t *self, void *reader);
// Remove a reader from the poller; returns 0 if OK, -1 on failure. The reader
// must have been passed during construction, or in an zpoller_add () call.
int
zpoller_remove (zpoller_t *self, void *reader);
// By default the poller stops if the process receives a SIGINT or SIGTERM
// signal. This makes it impossible to shut-down message based architectures
// like zactors. This method lets you switch off break handling. The default
// nonstop setting is off (false).
void
zpoller_set_nonstop (zpoller_t *self, bool nonstop);
// Poll the registered readers for I/O, return first reader that has input.
// The reader will be a libzmq void * socket, or a zsock_t or zactor_t
// instance as specified in zpoller_new/zpoller_add. The timeout should be
// zero or greater, or -1 to wait indefinitely. Socket priority is defined
// by their order in the poll list. If you need a balanced poll, use the low
// level zmq_poll method directly. If the poll call was interrupted (SIGINT),
// or the ZMQ context was destroyed, or the timeout expired, returns NULL.
// You can test the actual exit condition by calling zpoller_expired () and
// zpoller_terminated (). The timeout is in msec.
void *
zpoller_wait (zpoller_t *self, int timeout);
// Return true if the last zpoller_wait () call ended because the timeout
// expired, without any error.
bool
zpoller_expired (zpoller_t *self);
// Return true if the last zpoller_wait () call ended because the process
// was interrupted, or the parent context was destroyed.
bool
zpoller_terminated (zpoller_t *self);
// Self test of this class.
void
zpoller_test (bool verbose);
// CLASS: zproc
// Returns CZMQ version as a single 6-digit integer encoding the major
// version (x 10000), the minor version (x 100) and the patch.
int
zproc_czmq_version (void);
// Returns true if the process received a SIGINT or SIGTERM signal.
// It is good practice to use this method to exit any infinite loop
// processing messages.
bool
zproc_interrupted (void);
// Returns true if the underlying libzmq supports CURVE security.
bool
zproc_has_curve (void);
// Return current host name, for use in public tcp:// endpoints.
// If the host name is not resolvable, returns NULL.
char *
zproc_hostname (void);
// Move the current process into the background. The precise effect
// depends on the operating system. On POSIX boxes, moves to a specified
// working directory (if specified), closes all file handles, reopens
// stdin, stdout, and stderr to the null device, and sets the process to
// ignore SIGHUP. On Windows, does nothing. Returns 0 if OK, -1 if there
// was an error.
void
zproc_daemonize (const char *workdir);
// Drop the process ID into the lockfile, with exclusive lock, and
// switch the process to the specified group and/or user. Any of the
// arguments may be null, indicating a no-op. Returns 0 on success,
// -1 on failure. Note if you combine this with zsys_daemonize, run
// after, not before that method, or the lockfile will hold the wrong
// process ID.
void
zproc_run_as (const char *lockfile, const char *group, const char *user);
// Configure the number of I/O threads that ZeroMQ will use. A good
// rule of thumb is one thread per gigabit of traffic in or out. The
// default is 1, sufficient for most applications. If the environment
// variable ZSYS_IO_THREADS is defined, that provides the default.
// Note that this method is valid only before any socket is created.
void
zproc_set_io_threads (size_t io_threads);
// Configure the number of sockets that ZeroMQ will allow. The default
// is 1024. The actual limit depends on the system, and you can query it
// by using zsys_socket_limit (). A value of zero means "maximum".
// Note that this method is valid only before any socket is created.
void
zproc_set_max_sockets (size_t max_sockets);
// Set network interface name to use for broadcasts, particularly zbeacon.
// This lets the interface be configured for test environments where required.
// For example, on Mac OS X, zbeacon cannot bind to 255.255.255.255 which is
// the default when there is no specified interface. If the environment
// variable ZSYS_INTERFACE is set, use that as the default interface name.
// Setting the interface to "*" means "use all available interfaces".
void
zproc_set_biface (const char *value);
// Return network interface to use for broadcasts, or "" if none was set.
const char *
zproc_biface (void);
// Set log identity, which is a string that prefixes all log messages sent
// by this process. The log identity defaults to the environment variable
// ZSYS_LOGIDENT, if that is set.
void
zproc_set_log_ident (const char *value);
// Sends log output to a PUB socket bound to the specified endpoint. To
// collect such log output, create a SUB socket, subscribe to the traffic
// you care about, and connect to the endpoint. Log traffic is sent as a
// single string frame, in the same format as when sent to stdout. The
// log system supports a single sender; multiple calls to this method will
// bind the same sender to multiple endpoints. To disable the sender, call
// this method with a null argument.
void
zproc_set_log_sender (const char *endpoint);
// Enable or disable logging to the system facility (syslog on POSIX boxes,
// event log on Windows). By default this is disabled.
void
zproc_set_log_system (bool logsystem);
// Log error condition - highest priority
void
zproc_log_error (const char *format, ...);
// Log warning condition - high priority
void
zproc_log_warning (const char *format, ...);
// Log normal, but significant, condition - normal priority
void
zproc_log_notice (const char *format, ...);
// Log informational message - low priority
void
zproc_log_info (const char *format, ...);
// Log debug-level message - lowest priority
void
zproc_log_debug (const char *format, ...);
// Self test of this class.
void
zproc_test (bool verbose);
// CLASS: zsock
// Create a new socket. Returns the new socket, or NULL if the new socket
// could not be created. Note that the symbol zsock_new (and other
// constructors/destructors for zsock) are redirected to the *_checked
// variant, enabling intelligent socket leak detection. This can have
// performance implications if you use a LOT of sockets. To turn off this
// redirection behaviour, define ZSOCK_NOCHECK.
zsock_t *
zsock_new (int type);
// Destroy the socket. You must use this for any socket created via the
// zsock_new method.
void
zsock_destroy (zsock_t **self_p);
// Create a PUB socket. Default action is bind.
zsock_t *
zsock_new_pub (const char *endpoint);
// Create a SUB socket, and optionally subscribe to some prefix string. Default
// action is connect.
zsock_t *
zsock_new_sub (const char *endpoint, const char *subscribe);
// Create a REQ socket. Default action is connect.
zsock_t *
zsock_new_req (const char *endpoint);
// Create a REP socket. Default action is bind.
zsock_t *
zsock_new_rep (const char *endpoint);
// Create a DEALER socket. Default action is connect.
zsock_t *
zsock_new_dealer (const char *endpoint);
// Create a ROUTER socket. Default action is bind.
zsock_t *
zsock_new_router (const char *endpoint);
// Create a PUSH socket. Default action is connect.
zsock_t *
zsock_new_push (const char *endpoint);
// Create a PULL socket. Default action is bind.
zsock_t *
zsock_new_pull (const char *endpoint);
// Create an XPUB socket. Default action is bind.
zsock_t *
zsock_new_xpub (const char *endpoint);
// Create an XSUB socket. Default action is connect.
zsock_t *
zsock_new_xsub (const char *endpoint);
// Create a PAIR socket. Default action is connect.
zsock_t *
zsock_new_pair (const char *endpoint);
// Create a STREAM socket. Default action is connect.
zsock_t *
zsock_new_stream (const char *endpoint);
// Create a SERVER socket. Default action is bind.
zsock_t *
zsock_new_server (const char *endpoint);
// Create a CLIENT socket. Default action is connect.
zsock_t *
zsock_new_client (const char *endpoint);
// Bind a socket to a formatted endpoint. For tcp:// endpoints, supports
// ephemeral ports, if you specify the port number as "*". By default
// zsock uses the IANA designated range from C000 (49152) to FFFF (65535).
// To override this range, follow the "*" with "[first-last]". Either or
// both first and last may be empty. To bind to a random port within the
// range, use "!" in place of "*".
//
// Examples:
// tcp://127.0.0.1:* bind to first free port from C000 up
// tcp://127.0.0.1:! bind to random port from C000 to FFFF
// tcp://127.0.0.1:*[60000-] bind to first free port from 60000 up
// tcp://127.0.0.1:![-60000] bind to random port from C000 to 60000
// tcp://127.0.0.1:![55000-55999]
// bind to random port from 55000 to 55999
//
// On success, returns the actual port number used, for tcp:// endpoints,
// and 0 for other transports. On failure, returns -1. Note that when using
// ephemeral ports, a port may be reused by different services without
// clients being aware. Protocols that run on ephemeral ports should take
// this into account.
int
zsock_bind (zsock_t *self, const char *format, ...);
// Returns last bound endpoint, if any.
const char *
zsock_endpoint (zsock_t *self);
// Unbind a socket from a formatted endpoint.
// Returns 0 if OK, -1 if the endpoint was invalid or the function
// isn't supported.
int
zsock_unbind (zsock_t *self, const char *format, ...);
// Connect a socket to a formatted endpoint
// Returns 0 if OK, -1 if the endpoint was invalid.
int
zsock_connect (zsock_t *self, const char *format, ...);
// Disconnect a socket from a formatted endpoint
// Returns 0 if OK, -1 if the endpoint was invalid or the function
// isn't supported.
int
zsock_disconnect (zsock_t *self, const char *format, ...);
// Attach a socket to zero or more endpoints. If endpoints is not null,
// parses as list of ZeroMQ endpoints, separated by commas, and prefixed by
// '@' (to bind the socket) or '>' (to connect the socket). Returns 0 if all
// endpoints were valid, or -1 if there was a syntax error. If the endpoint
// does not start with '@' or '>', the serverish argument defines whether
// it is used to bind (serverish = true) or connect (serverish = false).
int
zsock_attach (zsock_t *self, const char *endpoints, bool serverish);
// Returns socket type as printable constant string.
const char *
zsock_type_str (zsock_t *self);
// Send a 'picture' message to the socket (or actor). The picture is a
// string that defines the type of each frame. This makes it easy to send
// a complex multiframe message in one call. The picture can contain any
// of these characters, each corresponding to one or two arguments:
//
// i = int (signed)
// 1 = uint8_t
// 2 = uint16_t
// 4 = uint32_t
// 8 = uint64_t
// s = char *
// b = byte *, size_t (2 arguments)
// c = zchunk_t *
// f = zframe_t *
// h = zhashx_t *
// U = zuuid_t *
// p = void * (sends the pointer value, only meaningful over inproc)
// m = zmsg_t * (sends all frames in the zmsg)
// z = sends zero-sized frame (0 arguments)
// u = uint (deprecated)
//
// Note that s, b, c, and f are encoded the same way and the choice is
// offered as a convenience to the sender, which may or may not already
// have data in a zchunk or zframe. Does not change or take ownership of
// any arguments. Returns 0 if successful, -1 if sending failed for any
// reason.
int
zsock_send (void *self, const char *picture, ...);
// Send a 'picture' message to the socket (or actor). This is a va_list
// version of zsock_send (), so please consult its documentation for the
// details.
int
zsock_vsend (void *self, const char *picture, va_list argptr);
// Receive a 'picture' message to the socket (or actor). See zsock_send for
// the format and meaning of the picture. Returns the picture elements into
// a series of pointers as provided by the caller:
//
// i = int * (stores signed integer)
// 4 = uint32_t * (stores 32-bit unsigned integer)
// 8 = uint64_t * (stores 64-bit unsigned integer)
// s = char ** (allocates new string)
// b = byte **, size_t * (2 arguments) (allocates memory)
// c = zchunk_t ** (creates zchunk)
// f = zframe_t ** (creates zframe)
// U = zuuid_t * (creates a zuuid with the data)
// h = zhashx_t ** (creates zhashx)
// p = void ** (stores pointer)
// m = zmsg_t ** (creates a zmsg with the remaing frames)
// z = null, asserts empty frame (0 arguments)
// u = uint * (stores unsigned integer, deprecated)
//
// Note that zsock_recv creates the returned objects, and the caller must
// destroy them when finished with them. The supplied pointers do not need
// to be initialized. Returns 0 if successful, or -1 if it failed to recv
// a message, in which case the pointers are not modified. When message
// frames are truncated (a short message), sets return values to zero/null.
// If an argument pointer is NULL, does not store any value (skips it).
// An 'n' picture matches an empty frame; if the message does not match,
// the method will return -1.
int
zsock_recv (void *self, const char *picture, ...);
// Receive a 'picture' message from the socket (or actor). This is a
// va_list version of zsock_recv (), so please consult its documentation
// for the details.
int
zsock_vrecv (void *self, const char *picture, va_list argptr);
// Send a binary encoded 'picture' message to the socket (or actor). This
// method is similar to zsock_send, except the arguments are encoded in a
// binary format that is compatible with zproto, and is designed to reduce
// memory allocations. The pattern argument is a string that defines the
// type of each argument. Supports these argument types:
//
// pattern C type zproto type:
// 1 uint8_t type = "number" size = "1"
// 2 uint16_t type = "number" size = "2"
// 4 uint32_t type = "number" size = "3"
// 8 uint64_t type = "number" size = "4"
// s char *, 0-255 chars type = "string"
// S char *, 0-2^32-1 chars type = "longstr"
// c zchunk_t * type = "chunk"
// f zframe_t * type = "frame"
// u zuuid_t * type = "uuid"
// m zmsg_t * type = "msg"
// p void *, sends pointer value, only over inproc
//
// Does not change or take ownership of any arguments. Returns 0 if
// successful, -1 if sending failed for any reason.
int
zsock_bsend (void *self, const char *picture, ...);
// Receive a binary encoded 'picture' message from the socket (or actor).
// This method is similar to zsock_recv, except the arguments are encoded
// in a binary format that is compatible with zproto, and is designed to
// reduce memory allocations. The pattern argument is a string that defines
// the type of each argument. See zsock_bsend for the supported argument
// types. All arguments must be pointers; this call sets them to point to
// values held on a per-socket basis.
// Note that zsock_brecv creates the returned objects, and the caller must
// destroy them when finished with them. The supplied pointers do not need
// to be initialized. Returns 0 if successful, or -1 if it failed to read
// a message.
int
zsock_brecv (void *self, const char *picture, ...);
// Return socket routing ID if any. This returns 0 if the socket is not
// of type ZMQ_SERVER or if no request was already received on it.
uint32_t
zsock_routing_id (zsock_t *self);
// Set routing ID on socket. The socket MUST be of type ZMQ_SERVER.
// This will be used when sending messages on the socket via the zsock API.
void
zsock_set_routing_id (zsock_t *self, uint32_t routing_id);
// Set socket to use unbounded pipes (HWM=0); use this in cases when you are
// totally certain the message volume can fit in memory. This method works
// across all versions of ZeroMQ. Takes a polymorphic socket reference.
void
zsock_set_unbounded (void *self);
// Send a signal over a socket. A signal is a short message carrying a
// success/failure code (by convention, 0 means OK). Signals are encoded
// to be distinguishable from "normal" messages. Accepts a zsock_t or a
// zactor_t argument, and returns 0 if successful, -1 if the signal could
// not be sent. Takes a polymorphic socket reference.
int
zsock_signal (void *self, byte status);
// Wait on a signal. Use this to coordinate between threads, over pipe
// pairs. Blocks until the signal is received. Returns -1 on error, 0 or
// greater on success. Accepts a zsock_t or a zactor_t as argument.
// Takes a polymorphic socket reference.
int
zsock_wait (void *self);
// If there is a partial message still waiting on the socket, remove and
// discard it. This is useful when reading partial messages, to get specific
// message types.
void
zsock_flush (void *self);
// Probe the supplied object, and report if it looks like a zsock_t.
// Takes a polymorphic socket reference.
bool
zsock_is (void *self);
// Probe the supplied reference. If it looks like a zsock_t instance, return
// the underlying libzmq socket handle; else if it looks like a file
// descriptor, return NULL; else if it looks like a libzmq socket handle,
// return the supplied value. Takes a polymorphic socket reference.
void *
zsock_resolve (void *self);
// Get socket option `heartbeat_ivl`.
int
zsock_heartbeat_ivl (void *self);
// Set socket option `heartbeat_ivl`.
void
zsock_set_heartbeat_ivl (void *self, int heartbeat_ivl);
// Get socket option `heartbeat_ttl`.
int
zsock_heartbeat_ttl (void *self);
// Set socket option `heartbeat_ttl`.
void
zsock_set_heartbeat_ttl (void *self, int heartbeat_ttl);
// Get socket option `heartbeat_timeout`.
int
zsock_heartbeat_timeout (void *self);
// Set socket option `heartbeat_timeout`.
void
zsock_set_heartbeat_timeout (void *self, int heartbeat_timeout);
// Get socket option `tos`.
int
zsock_tos (void *self);
// Set socket option `tos`.
void
zsock_set_tos (void *self, int tos);
// Set socket option `router_handover`.
void
zsock_set_router_handover (void *self, int router_handover);
// Set socket option `router_mandatory`.
void
zsock_set_router_mandatory (void *self, int router_mandatory);
// Set socket option `probe_router`.
void
zsock_set_probe_router (void *self, int probe_router);
// Set socket option `req_relaxed`.
void
zsock_set_req_relaxed (void *self, int req_relaxed);
// Set socket option `req_correlate`.
void
zsock_set_req_correlate (void *self, int req_correlate);
// Set socket option `conflate`.
void
zsock_set_conflate (void *self, int conflate);
// Get socket option `zap_domain`.
char *
zsock_zap_domain (void *self);
// Set socket option `zap_domain`.
void
zsock_set_zap_domain (void *self, const char *zap_domain);
// Get socket option `mechanism`.
int
zsock_mechanism (void *self);
// Get socket option `plain_server`.
int
zsock_plain_server (void *self);
// Set socket option `plain_server`.
void
zsock_set_plain_server (void *self, int plain_server);
// Get socket option `plain_username`.
char *
zsock_plain_username (void *self);
// Set socket option `plain_username`.
void
zsock_set_plain_username (void *self, const char *plain_username);
// Get socket option `plain_password`.
char *
zsock_plain_password (void *self);
// Set socket option `plain_password`.
void
zsock_set_plain_password (void *self, const char *plain_password);
// Get socket option `curve_server`.
int
zsock_curve_server (void *self);
// Set socket option `curve_server`.
void
zsock_set_curve_server (void *self, int curve_server);
// Get socket option `curve_publickey`.
char *
zsock_curve_publickey (void *self);
// Set socket option `curve_publickey`.
void
zsock_set_curve_publickey (void *self, const char *curve_publickey);
// Set socket option `curve_publickey` from 32-octet binary
void
zsock_set_curve_publickey_bin (void *self, const byte *curve_publickey);
// Get socket option `curve_secretkey`.
char *
zsock_curve_secretkey (void *self);
// Set socket option `curve_secretkey`.
void
zsock_set_curve_secretkey (void *self, const char *curve_secretkey);
// Set socket option `curve_secretkey` from 32-octet binary
void
zsock_set_curve_secretkey_bin (void *self, const byte *curve_secretkey);
// Get socket option `curve_serverkey`.
char *
zsock_curve_serverkey (void *self);
// Set socket option `curve_serverkey`.
void
zsock_set_curve_serverkey (void *self, const char *curve_serverkey);
// Set socket option `curve_serverkey` from 32-octet binary
void
zsock_set_curve_serverkey_bin (void *self, const byte *curve_serverkey);
// Get socket option `gssapi_server`.
int
zsock_gssapi_server (void *self);
// Set socket option `gssapi_server`.
void
zsock_set_gssapi_server (void *self, int gssapi_server);
// Get socket option `gssapi_plaintext`.
int
zsock_gssapi_plaintext (void *self);
// Set socket option `gssapi_plaintext`.
void
zsock_set_gssapi_plaintext (void *self, int gssapi_plaintext);
// Get socket option `gssapi_principal`.
char *
zsock_gssapi_principal (void *self);
// Set socket option `gssapi_principal`.
void
zsock_set_gssapi_principal (void *self, const char *gssapi_principal);
// Get socket option `gssapi_service_principal`.
char *
zsock_gssapi_service_principal (void *self);
// Set socket option `gssapi_service_principal`.
void
zsock_set_gssapi_service_principal (void *self, const char *gssapi_service_principal);
// Get socket option `ipv6`.
int
zsock_ipv6 (void *self);
// Set socket option `ipv6`.
void
zsock_set_ipv6 (void *self, int ipv6);
// Get socket option `immediate`.
int
zsock_immediate (void *self);
// Set socket option `immediate`.
void
zsock_set_immediate (void *self, int immediate);
// Set socket option `router_raw`.
void
zsock_set_router_raw (void *self, int router_raw);
// Get socket option `ipv4only`.
int
zsock_ipv4only (void *self);
// Set socket option `ipv4only`.
void
zsock_set_ipv4only (void *self, int ipv4only);
// Set socket option `delay_attach_on_connect`.
void
zsock_set_delay_attach_on_connect (void *self, int delay_attach_on_connect);
// Get socket option `type`.
int
zsock_type (void *self);
// Get socket option `sndhwm`.
int
zsock_sndhwm (void *self);
// Set socket option `sndhwm`.
void
zsock_set_sndhwm (void *self, int sndhwm);
// Get socket option `rcvhwm`.
int
zsock_rcvhwm (void *self);
// Set socket option `rcvhwm`.
void
zsock_set_rcvhwm (void *self, int rcvhwm);
// Get socket option `affinity`.
int
zsock_affinity (void *self);
// Set socket option `affinity`.
void
zsock_set_affinity (void *self, int affinity);
// Set socket option `subscribe`.
void
zsock_set_subscribe (void *self, const char *subscribe);
// Set socket option `unsubscribe`.
void
zsock_set_unsubscribe (void *self, const char *unsubscribe);
// Get socket option `identity`.
char *
zsock_identity (void *self);
// Set socket option `identity`.
void
zsock_set_identity (void *self, const char *identity);
// Get socket option `rate`.
int
zsock_rate (void *self);
// Set socket option `rate`.
void
zsock_set_rate (void *self, int rate);
// Get socket option `recovery_ivl`.
int
zsock_recovery_ivl (void *self);
// Set socket option `recovery_ivl`.
void
zsock_set_recovery_ivl (void *self, int recovery_ivl);
// Get socket option `sndbuf`.
int
zsock_sndbuf (void *self);
// Set socket option `sndbuf`.
void
zsock_set_sndbuf (void *self, int sndbuf);
// Get socket option `rcvbuf`.
int
zsock_rcvbuf (void *self);
// Set socket option `rcvbuf`.
void
zsock_set_rcvbuf (void *self, int rcvbuf);
// Get socket option `linger`.
int
zsock_linger (void *self);
// Set socket option `linger`.
void
zsock_set_linger (void *self, int linger);
// Get socket option `reconnect_ivl`.
int
zsock_reconnect_ivl (void *self);
// Set socket option `reconnect_ivl`.
void
zsock_set_reconnect_ivl (void *self, int reconnect_ivl);
// Get socket option `reconnect_ivl_max`.
int
zsock_reconnect_ivl_max (void *self);
// Set socket option `reconnect_ivl_max`.
void
zsock_set_reconnect_ivl_max (void *self, int reconnect_ivl_max);
// Get socket option `backlog`.
int
zsock_backlog (void *self);
// Set socket option `backlog`.
void
zsock_set_backlog (void *self, int backlog);
// Get socket option `maxmsgsize`.
int
zsock_maxmsgsize (void *self);
// Set socket option `maxmsgsize`.
void
zsock_set_maxmsgsize (void *self, int maxmsgsize);
// Get socket option `multicast_hops`.
int
zsock_multicast_hops (void *self);
// Set socket option `multicast_hops`.
void
zsock_set_multicast_hops (void *self, int multicast_hops);
// Get socket option `rcvtimeo`.
int
zsock_rcvtimeo (void *self);
// Set socket option `rcvtimeo`.
void
zsock_set_rcvtimeo (void *self, int rcvtimeo);
// Get socket option `sndtimeo`.
int
zsock_sndtimeo (void *self);
// Set socket option `sndtimeo`.
void
zsock_set_sndtimeo (void *self, int sndtimeo);
// Set socket option `xpub_verbose`.
void
zsock_set_xpub_verbose (void *self, int xpub_verbose);
// Get socket option `tcp_keepalive`.
int
zsock_tcp_keepalive (void *self);
// Set socket option `tcp_keepalive`.
void
zsock_set_tcp_keepalive (void *self, int tcp_keepalive);
// Get socket option `tcp_keepalive_idle`.
int
zsock_tcp_keepalive_idle (void *self);
// Set socket option `tcp_keepalive_idle`.
void
zsock_set_tcp_keepalive_idle (void *self, int tcp_keepalive_idle);
// Get socket option `tcp_keepalive_cnt`.
int
zsock_tcp_keepalive_cnt (void *self);
// Set socket option `tcp_keepalive_cnt`.
void
zsock_set_tcp_keepalive_cnt (void *self, int tcp_keepalive_cnt);
// Get socket option `tcp_keepalive_intvl`.
int
zsock_tcp_keepalive_intvl (void *self);
// Set socket option `tcp_keepalive_intvl`.
void
zsock_set_tcp_keepalive_intvl (void *self, int tcp_keepalive_intvl);
// Get socket option `tcp_accept_filter`.
char *
zsock_tcp_accept_filter (void *self);
// Set socket option `tcp_accept_filter`.
void
zsock_set_tcp_accept_filter (void *self, const char *tcp_accept_filter);
// Get socket option `rcvmore`.
int
zsock_rcvmore (void *self);
// Get socket option `fd`.
SOCKET
zsock_fd (void *self);
// Get socket option `events`.
int
zsock_events (void *self);
// Get socket option `last_endpoint`.
char *
zsock_last_endpoint (void *self);
// Self test of this class.
void
zsock_test (bool verbose);
// CLASS: zstr
// Receive C string from socket. Caller must free returned string using
// zstr_free(). Returns NULL if the context is being terminated or the
// process was interrupted.
char *
zstr_recv (void *source);
// Receive a series of strings (until NULL) from multipart data.
// Each string is allocated and filled with string data; if there
// are not enough frames, unallocated strings are set to NULL.
// Returns -1 if the message could not be read, else returns the
// number of strings filled, zero or more. Free each returned string
// using zstr_free(). If not enough strings are provided, remaining
// multipart frames in the message are dropped.
int
zstr_recvx (void *source, char **string_p, ...);
// Send a C string to a socket, as a frame. The string is sent without
// trailing null byte; to read this you can use zstr_recv, or a similar
// method that adds a null terminator on the received string. String
// may be NULL, which is sent as "".
int
zstr_send (void *dest, const char *string);
// Send a C string to a socket, as zstr_send(), with a MORE flag, so that
// you can send further strings in the same multi-part message.
int
zstr_sendm (void *dest, const char *string);
// Send a formatted string to a socket. Note that you should NOT use
// user-supplied strings in the format (they may contain '%' which
// will create security holes).
int
zstr_sendf (void *dest, const char *format, ...);
// Send a formatted string to a socket, as for zstr_sendf(), with a
// MORE flag, so that you can send further strings in the same multi-part
// message.
int
zstr_sendfm (void *dest, const char *format, ...);
// Send a series of strings (until NULL) as multipart data
// Returns 0 if the strings could be sent OK, or -1 on error.
int
zstr_sendx (void *dest, const char *string, ...);
// Accepts a void pointer and returns a fresh character string. If source
// is null, returns an empty string.
char *
zstr_str (void *source);
// Free a provided string, and nullify the parent pointer. Safe to call on
// a null pointer.
void
zstr_free (char **string_p);
// Self test of this class.
void
zstr_test (bool verbose);
// CLASS: ztrie
// Creates a new ztrie.
ztrie_t *
ztrie_new (char delimiter);
// Destroy the ztrie.
void
ztrie_destroy (ztrie_t **self_p);
// Inserts a new route into the tree and attaches the data. Returns -1
// if the route already exists, otherwise 0. This method takes ownership of
// the provided data if a destroy_data_fn is provided.
int
ztrie_insert_route (ztrie_t *self, const char *path, void *data, ztrie_destroy_data_fn destroy_data_fn);
// Removes a route from the trie and destroys its data. Returns -1 if the
// route does not exists, otherwise 0.
// the start of the list call zlist_first (). Advances the cursor.
int
ztrie_remove_route (ztrie_t *self, const char *path);
// Returns true if the path matches a route in the tree, otherwise false.
bool
ztrie_matches (ztrie_t *self, const char *path);
// Returns the data of a matched route from last ztrie_matches. If the path
// did not match, returns NULL. Do not delete the data as it's owned by
// ztrie.
void *
ztrie_hit_data (ztrie_t *self);
// Returns the count of parameters that a matched route has.
size_t
ztrie_hit_parameter_count (ztrie_t *self);
// Returns the parameters of a matched route with named regexes from last
// ztrie_matches. If the path did not match or the route did not contain any
// named regexes, returns NULL.
zhashx_t *
ztrie_hit_parameters (ztrie_t *self);
// Returns the asterisk matched part of a route, if there has been no match
// or no asterisk match, returns NULL.
const char *
ztrie_hit_asterisk_match (ztrie_t *self);
// Print the trie
void
ztrie_print (ztrie_t *self);
// Self test of this class.
void
ztrie_test (bool verbose);
// CLASS: zuuid
// Create a new UUID object.
zuuid_t *
zuuid_new (void);
// Destroy a specified UUID object.
void
zuuid_destroy (zuuid_t **self_p);
// Create UUID object from supplied ZUUID_LEN-octet value.
zuuid_t *
zuuid_new_from (const byte *source);
// Set UUID to new supplied ZUUID_LEN-octet value.
void
zuuid_set (zuuid_t *self, const byte *source);
// Set UUID to new supplied string value skipping '-' and '{' '}'
// optional delimiters. Return 0 if OK, else returns -1.
int
zuuid_set_str (zuuid_t *self, const char *source);
// Return UUID binary data.
const byte *
zuuid_data (zuuid_t *self);
// Return UUID binary size
size_t
zuuid_size (zuuid_t *self);
// Returns UUID as string
const char *
zuuid_str (zuuid_t *self);
// Return UUID in the canonical string format: 8-4-4-4-12, in lower
// case. Caller does not modify or free returned value. See
// http://en.wikipedia.org/wiki/Universally_unique_identifier
const char *
zuuid_str_canonical (zuuid_t *self);
// Store UUID blob in target array
void
zuuid_export (zuuid_t *self, byte *target);
// Check if UUID is same as supplied value
bool
zuuid_eq (zuuid_t *self, const byte *compare);
// Check if UUID is different from supplied value
bool
zuuid_neq (zuuid_t *self, const byte *compare);
// Make copy of UUID object; if uuid is null, or memory was exhausted,
// returns null.
zuuid_t *
zuuid_dup (zuuid_t *self);
// Self test of this class.
void
zuuid_test (bool verbose);
'''
cdefs = re.sub(r';[^;]*\bva_list\b[^;]*;', ';', cdefs, flags=re.S) # we don't support anything with a va_list arg
ffi.cdef(cdefs)
| mpl-2.0 | -4,268,107,658,003,528,700 | 36.159807 | 113 | 0.627411 | false |
TribeMedia/synapse | tests/storage/test_base.py | 2 | 6301 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tests import unittest
from twisted.internet import defer
from mock import Mock
from collections import OrderedDict
from synapse.server import HomeServer
from synapse.storage._base import SQLBaseStore
from synapse.storage.engines import create_engine
class SQLBaseStoreTestCase(unittest.TestCase):
""" Test the "simple" SQL generating methods in SQLBaseStore. """
def setUp(self):
self.db_pool = Mock(spec=["runInteraction"])
self.mock_txn = Mock()
self.mock_conn = Mock(spec_set=["cursor", "rollback", "commit"])
self.mock_conn.cursor.return_value = self.mock_txn
self.mock_conn.rollback.return_value = None
# Our fake runInteraction just runs synchronously inline
def runInteraction(func, *args, **kwargs):
return defer.succeed(func(self.mock_txn, *args, **kwargs))
self.db_pool.runInteraction = runInteraction
def runWithConnection(func, *args, **kwargs):
return defer.succeed(func(self.mock_conn, *args, **kwargs))
self.db_pool.runWithConnection = runWithConnection
config = Mock()
config.event_cache_size = 1
config.database_config = {"name": "sqlite3"}
hs = HomeServer(
"test",
db_pool=self.db_pool,
config=config,
database_engine=create_engine(config.database_config),
)
self.datastore = SQLBaseStore(hs)
@defer.inlineCallbacks
def test_insert_1col(self):
self.mock_txn.rowcount = 1
yield self.datastore._simple_insert(
table="tablename",
values={"columname": "Value"}
)
self.mock_txn.execute.assert_called_with(
"INSERT INTO tablename (columname) VALUES(?)", ("Value",)
)
@defer.inlineCallbacks
def test_insert_3cols(self):
self.mock_txn.rowcount = 1
yield self.datastore._simple_insert(
table="tablename",
# Use OrderedDict() so we can assert on the SQL generated
values=OrderedDict([("colA", 1), ("colB", 2), ("colC", 3)])
)
self.mock_txn.execute.assert_called_with(
"INSERT INTO tablename (colA, colB, colC) VALUES(?, ?, ?)",
(1, 2, 3,)
)
@defer.inlineCallbacks
def test_select_one_1col(self):
self.mock_txn.rowcount = 1
self.mock_txn.fetchall.return_value = [("Value",)]
value = yield self.datastore._simple_select_one_onecol(
table="tablename",
keyvalues={"keycol": "TheKey"},
retcol="retcol"
)
self.assertEquals("Value", value)
self.mock_txn.execute.assert_called_with(
"SELECT retcol FROM tablename WHERE keycol = ?", ["TheKey"]
)
@defer.inlineCallbacks
def test_select_one_3col(self):
self.mock_txn.rowcount = 1
self.mock_txn.fetchone.return_value = (1, 2, 3)
ret = yield self.datastore._simple_select_one(
table="tablename",
keyvalues={"keycol": "TheKey"},
retcols=["colA", "colB", "colC"]
)
self.assertEquals({"colA": 1, "colB": 2, "colC": 3}, ret)
self.mock_txn.execute.assert_called_with(
"SELECT colA, colB, colC FROM tablename WHERE keycol = ?",
["TheKey"]
)
@defer.inlineCallbacks
def test_select_one_missing(self):
self.mock_txn.rowcount = 0
self.mock_txn.fetchone.return_value = None
ret = yield self.datastore._simple_select_one(
table="tablename",
keyvalues={"keycol": "Not here"},
retcols=["colA"],
allow_none=True
)
self.assertFalse(ret)
@defer.inlineCallbacks
def test_select_list(self):
self.mock_txn.rowcount = 3
self.mock_txn.fetchall.return_value = ((1,), (2,), (3,))
self.mock_txn.description = (
("colA", None, None, None, None, None, None),
)
ret = yield self.datastore._simple_select_list(
table="tablename",
keyvalues={"keycol": "A set"},
retcols=["colA"],
)
self.assertEquals([{"colA": 1}, {"colA": 2}, {"colA": 3}], ret)
self.mock_txn.execute.assert_called_with(
"SELECT colA FROM tablename WHERE keycol = ?",
["A set"]
)
@defer.inlineCallbacks
def test_update_one_1col(self):
self.mock_txn.rowcount = 1
yield self.datastore._simple_update_one(
table="tablename",
keyvalues={"keycol": "TheKey"},
updatevalues={"columnname": "New Value"}
)
self.mock_txn.execute.assert_called_with(
"UPDATE tablename SET columnname = ? WHERE keycol = ?",
["New Value", "TheKey"]
)
@defer.inlineCallbacks
def test_update_one_4cols(self):
self.mock_txn.rowcount = 1
yield self.datastore._simple_update_one(
table="tablename",
keyvalues=OrderedDict([("colA", 1), ("colB", 2)]),
updatevalues=OrderedDict([("colC", 3), ("colD", 4)])
)
self.mock_txn.execute.assert_called_with(
"UPDATE tablename SET colC = ?, colD = ? WHERE"
" colA = ? AND colB = ?",
[3, 4, 1, 2]
)
@defer.inlineCallbacks
def test_delete_one(self):
self.mock_txn.rowcount = 1
yield self.datastore._simple_delete_one(
table="tablename",
keyvalues={"keycol": "Go away"},
)
self.mock_txn.execute.assert_called_with(
"DELETE FROM tablename WHERE keycol = ?", ["Go away"]
)
| apache-2.0 | -5,984,975,718,698,454,000 | 30.823232 | 74 | 0.586891 | false |
swistakm/graceful | tests/test_auth.py | 1 | 9116 | # -*- coding: utf-8 -*-
import base64
import pytest
import hashlib
from falcon.testing import TestBase
from falcon import API
from falcon import status_codes
from graceful.resources.base import BaseResource
from graceful import authentication
from graceful import authorization
@authorization.authentication_required
class ExampleResource(BaseResource, with_context=True):
def on_get(self, req, resp, **kwargs):
assert 'user' in req.context
class ExampleKVUserStorage(authentication.KeyValueUserStorage):
class SimpleKVStore(dict):
def set(self, key, value):
self[key] = value
def __init__(self, data=None):
super().__init__(self.SimpleKVStore(data or {}))
def clear(self):
self.kv_store.clear()
@ExampleKVUserStorage.hash_identifier.register(authentication.Basic)
def _(identified_with, identifier):
return ":".join([
identifier[0],
hashlib.sha1(identifier[1].encode()).hexdigest()
])
def test_default_kv_hashes_only_strings():
with pytest.raises(TypeError):
ExampleKVUserStorage.hash_identifier(None, [1, 2, 3, 4])
def test_invalid_basic_auth_realm():
with pytest.raises(ValueError):
authentication.Basic(realm="Impro=per realm%%% &")
@pytest.mark.parametrize(
"auth_class", [
authentication.Basic,
authentication.Token,
authentication.XAPIKey,
]
)
def test_auth_requires_storage(auth_class):
with pytest.raises(ValueError):
auth_class()
class AuthTestsMixin:
""" Test mixin that defines common routine for testing auth classes.
"""
class SkipTest(Exception):
"""Raised when given tests is marked to be skipped
Note: we use this exception instead of self.skipTest() method because
this has slightly different semantics. We simply don't want to report
these tests as skipped.
"""
route = '/foo/'
user = {
"username": "foo",
"details": "bar",
"password": "secretP4ssw0rd",
"allowed_ip": "127.100.100.1",
"allowed_remote": "127.0.0.1",
"token": "s3cr3t70ken",
'allowed_ip_range': ['127.100.100.1'],
}
ident_keys = ['password']
auth_storage = ExampleKVUserStorage()
auth_middleware = [authentication.Anonymous(user)]
def get_authorized_headers(self):
raise NotImplementedError
def get_invalid_headers(self):
raise NotImplementedError
def get_unauthorized_headers(self):
return {}
def setUp(self):
super().setUp()
self.api = API(middleware=self.auth_middleware)
self.api.add_route(self.route, ExampleResource())
self.auth_storage.clear()
identity = [self.user[key] for key in self.ident_keys]
self.auth_storage.register(
self.auth_middleware[0],
identity[0] if len(identity) == 1 else identity,
self.user
)
def test_unauthorized(self):
try:
self.simulate_request(
self.route, decode='utf-8', method='GET',
headers=self.get_unauthorized_headers()
)
assert self.srmock.status == status_codes.HTTP_UNAUTHORIZED
except self.SkipTest:
pass
def test_authorized(self):
try:
self.simulate_request(
self.route, decode='utf-8', method='GET',
headers=self.get_authorized_headers()
)
assert self.srmock.status == status_codes.HTTP_OK
except self.SkipTest:
pass
def test_bad_request(self):
try:
maybe_multiple_headers_sets = self.get_invalid_headers()
if isinstance(maybe_multiple_headers_sets, tuple):
header_sets = maybe_multiple_headers_sets
else:
header_sets = (maybe_multiple_headers_sets,)
for headers in header_sets:
self.simulate_request(
self.route, decode='utf-8', method='GET',
headers=headers
)
assert self.srmock.status == status_codes.HTTP_BAD_REQUEST
except self.SkipTest:
pass
class AnonymousAuthTestCase(AuthTestsMixin, TestBase):
auth_middleware = [authentication.Anonymous(...)]
def get_authorized_headers(self):
return {}
def get_unauthorized_headers(self):
# note: Anonymous always authenticates the user.
raise self.SkipTest
def get_invalid_headers(self):
# note: it is not possible to have invalid header for this auth.
raise self.SkipTest
class BasicAuthTestCase(AuthTestsMixin, TestBase):
auth_middleware = [authentication.Basic(AuthTestsMixin.auth_storage)]
ident_keys = ['username', 'password']
def get_authorized_headers(self):
return {
"Authorization":
"Basic " + base64.b64encode(
":".join(
[self.user['username'], self.user['password']]
).encode()
).decode()
}
def get_invalid_headers(self):
return (
# to many header tokens
{"Authorization": "Basic Basic Basic"},
# non base64 decoded
{"Authorization": "Basic nonbase64decoded"}
)
class TokenAuthTestCase(AuthTestsMixin, TestBase):
auth_middleware = [authentication.Token(AuthTestsMixin.auth_storage)]
ident_keys = ['token']
def get_authorized_headers(self):
return {"Authorization": "Token " + self.user['token']}
def get_invalid_headers(self):
return {"Authorization": "Token Token Token"}
class XAPIKeyAuthTestCase(AuthTestsMixin, TestBase):
auth_middleware = [authentication.XAPIKey(AuthTestsMixin.auth_storage)]
ident_keys = ['token']
def get_authorized_headers(self):
return {"X-Api-Key": self.user['token']}
def get_invalid_headers(self):
# note: it is not possible to have invalid header for this auth.
raise self.SkipTest
class XForwardedForAuthTestCase(AuthTestsMixin, TestBase):
auth_middleware = [
authentication.XForwardedFor(AuthTestsMixin.auth_storage)
]
ident_keys = ['allowed_ip']
def get_authorized_headers(self):
return {"X-Forwarded-For": self.user['allowed_ip']}
def get_invalid_headers(self):
# note: it is not possible to have invalid header for this auth.
raise self.SkipTest
class XForwardedForWithoutStorageAuthTestCase(AuthTestsMixin, TestBase):
auth_middleware = [authentication.XForwardedFor()]
ident_keys = ['allowed_ip']
def get_authorized_headers(self):
return {"X-Forwarded-For": self.user['allowed_ip']}
def get_invalid_headers(self):
# note: it is not possible to have invalid header for this auth.
raise self.SkipTest
class XForwardedForWithFallbackAuthTestCase(AuthTestsMixin, TestBase):
auth_middleware = [
authentication.XForwardedFor(remote_address_fallback=True)
]
ident_keys = ['allowed_remote']
def get_authorized_headers(self):
return {}
def get_unauthorized_headers(self):
raise self.SkipTest
def get_invalid_headers(self):
# note: it is not possible to have invalid header for this auth.
raise self.SkipTest
class IPRangeXForwardedForAuthTestCase(AuthTestsMixin, TestBase):
class IPRangeWhitelistStorage(authentication.IPRangeWhitelistStorage):
"""Test compatible implementation of IPRangeWhitelistStorage.
This implementation simply extends the base class with
tests-compatible ``register()`` and ``clear()`` methods.
"""
def register(self, identified_with, identity, user):
self.ip_range = identity
self.user = user
def clear(self):
self.ip_range = []
self.user = None
auth_storage = IPRangeWhitelistStorage([], None)
auth_middleware = [
authentication.XForwardedFor(auth_storage)
]
ident_keys = ['allowed_ip_range']
def get_authorized_headers(self):
return {'X-Forwarded-For': self.user['allowed_ip_range'][0]}
def get_unauthorized_headers(self):
raise self.SkipTest
def get_invalid_headers(self):
# note: it is not possible to have invalid header for this auth.
raise self.SkipTest
class MultipleAuthTestCase(AuthTestsMixin, TestBase):
auth_middleware = [
authentication.Token(AuthTestsMixin.auth_storage),
authentication.Anonymous(...),
authentication.Basic(AuthTestsMixin.auth_storage),
]
ident_keys = ["token"]
def get_unauthorized_headers(self):
# note: Anonymous will always authenticate the user as a fallback auth
raise self.SkipTest
def get_invalid_headers(self):
# this is invalid header for basic authentication
return {"Authorization": "Token Basic Basic"}
def get_authorized_headers(self):
return {"Authorization": "Token " + self.user['password']}
| bsd-3-clause | -8,304,785,357,824,380,000 | 28.597403 | 78 | 0.633502 | false |
chris-ch/lemvi-risk | scripts/track-drawdowns.py | 1 | 5109 | import argparse
import json
import logging
import os
from datetime import datetime
import tenacity
import gservices
from risklimits import extract_navs, compute_high_watermark, extract_flows
def from_excel_datetime(excel_date):
return datetime.fromordinal(datetime(1900, 1, 1).toordinal() + int(excel_date) - 2)
def from_excel_date(excel_date):
return from_excel_datetime(excel_date).date()
@tenacity.retry(wait=tenacity.wait_fixed(100), stop=tenacity.stop_after_attempt(5))
def main(args):
full_config_path = os.path.abspath(args.config)
logging.info('using config file "{}"'.format(full_config_path))
with open(full_config_path, 'r') as config_file:
config = json.load(config_file)
secrets_file_path = os.path.abspath(args.file_secret)
logging.info('using secrets file "{}"'.format(secrets_file_path))
with open(secrets_file_path) as json_data:
secrets_content = json.load(json_data)
google_credential = secrets_content['google.credential']
authorized_http, credentials = gservices.authorize_services(google_credential)
svc_sheet = gservices.create_service_sheets(credentials)
google_sheet_flow_id = config['google.sheet.flows.id']
workbook_flows = svc_sheet.open_by_key(google_sheet_flow_id)
flows = workbook_flows.worksheet_by_title('Flows EUR').get_all_records()
google_sheet_nav_id = config['google.sheet.navs.id']
workbook_navs = svc_sheet.open_by_key(google_sheet_nav_id)
navs = dict()
for tab in workbook_navs.worksheets():
navs[tab.title] = tab.get_all_records()
hwms, drawdowns = compute_high_watermark(extract_flows(flows), extract_navs(navs))
google_sheet_risk_limits_id = config['google.sheet.risk_limits.id']
workbook_risk_limits = svc_sheet.open_by_key(google_sheet_risk_limits_id)
sheet_hwm = workbook_risk_limits.worksheet_by_title('Adjusted High Watermarks')
sheet_drawdowns = workbook_risk_limits.worksheet_by_title('Drawdowns')
header_hwms = sheet_hwm.get_row(1, returnas='matrix')
header_drawdowns = sheet_drawdowns.get_row(1, returnas='matrix')
hwm_update_only = False
hwm_last_date_value = sheet_hwm.cell('A2').value
if hwm_last_date_value == '':
hwm_last_date_value = sheet_hwm.cell('A3').value
last_hwm_update = datetime.strptime(hwm_last_date_value, '%Y-%m-%d').date()
dd_update_only = False
dd_last_date_value = sheet_drawdowns.cell('A2').value
if dd_last_date_value == '':
dd_last_date_value = sheet_drawdowns.cell('A3').value
last_drawdown_update = datetime.strptime(dd_last_date_value, '%Y-%m-%d').date()
last_hwms = hwms[hwms.index > last_hwm_update].sort_index(ascending=False)
for as_of_date, row in last_hwms.iterrows():
row_data = [as_of_date.strftime('%Y-%m-%d')]
for account_id in header_hwms[1:]:
if account_id in row.to_dict():
value = row.to_dict()[account_id]
row_data.append(float(value))
else:
row_data.append(0.)
if hwm_update_only:
sheet_hwm.update_rows(row=1, number=1, values=[row_data])
else:
sheet_hwm.insert_rows(row=1, number=1, values=[row_data])
last_drawdowns = drawdowns[drawdowns.index > last_drawdown_update].sort_index(ascending=False)
for as_of_date, row in last_drawdowns.iterrows():
row_data = [as_of_date.strftime('%Y-%m-%d')]
for account_id in header_drawdowns[1:]:
if account_id in row.to_dict():
value = row.to_dict()[account_id]
row_data.append(float(value))
else:
row_data.append(0.)
if dd_update_only:
sheet_drawdowns.update_rows(row=1, number=1, values=[row_data])
else:
sheet_drawdowns.insert_rows(row=1, number=1, values=[row_data])
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(name)s:%(levelname)s:%(message)s')
logging.getLogger('requests').setLevel(logging.WARNING)
file_handler = logging.FileHandler('update-nav-hist.log', mode='w')
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s:%(message)s')
file_handler.setFormatter(formatter)
logging.getLogger().addHandler(file_handler)
parser = argparse.ArgumentParser(description='NAV history update.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--file-ibrokers-flex', type=str, help='InteractiveBrokers Flex response')
parser.add_argument('--file-secret', type=str, help='file including secret connection data', default='secrets.json')
parser.add_argument('--config', type=str, help='file including secret connection data', default='config.json')
args = parser.parse_args()
main(args)
| mit | 385,711,334,942,550,340 | 43.043103 | 120 | 0.633001 | false |
aishmittal/Product-Info-Crawler | demo/app/views.py | 1 | 1977 | from app import app
import os
from flask import render_template
from flask import Flask, redirect, url_for, request, send_from_directory
from flask import json
import sys
import sys
import csv
curfilePath = os.path.abspath(__file__)
curDir = os.path.abspath(os.path.join(curfilePath, os.pardir))
parDir = os.path.abspath(os.path.join(curDir, os.pardir))
tmpDir = os.path.abspath(os.path.join(curDir,'tmp/'))
resultFile=os.path.abspath(os.path.join(parDir,'results.csv'))
crawlerFile=os.path.abspath(os.path.join(curDir, os.pardir,os.pardir,'run_crawler_demo.py'))
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
@app.route('/')
@app.route('/index')
def index():
return render_template("index.html",title='Home')
@app.route('/search_results',methods = ['POST'])
def search_results():
if request.method == 'POST':
os.system('python '+crawlerFile+' '+ request.json['product'])
print 'Crawling Completed'
title=[]
image=[]
price=[]
url=[]
source=[]
with open(resultFile) as f:
records = csv.DictReader(f)
for row in records:
title.append(row['product_name'])
image.append(row['image_url'])
price.append(row['price'])
url.append(row['product_url'])
source.append(row['source'])
data=dict({'product_name':title,'image_url':image,'price':price,'product_url':url,'source':source})
response = app.response_class(
response=json.dumps(data, cls=MyEncoder),
status=200,
mimetype='application/json'
)
return response | mit | -3,283,697,977,506,034,700 | 30.903226 | 109 | 0.607486 | false |
Rahveiz/PingCheck | main.py | 1 | 6665 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
import time
import socket
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from mainwindow4 import Ui_MainWindow
#On cree un thread pour les operations de ping
class pingThread(QThread):
def __init__(self,hostname, timecheck,GUI, report):
QThread.__init__(self)
#On initialise les variables locales de la classe
self.hostname = hostname
self.timecheck = timecheck
self.ui = GUI
self.report = open(report + "/PyPingCheck_report.txt",'a')
#On recupere le temps de l'ouverture du thread comme temps du dernier succes
self.successtime= int(time.time())
#On initialise une variable d'erreur de ping
self.pingerror = False
def __del__(self):
self.wait()
def _ping_check(self, hostname, timecheck):
#On change le texte du resultat avant de faire un ping
self.text_result = "PingCheck : Checking ..."
self.ui.label_status.setText(self.text_result)
self.ui.label_status.setStyleSheet("color: rgba(0,0,0,1);")
#On ping l'ip entree en argument
#On redirige la sortie de la commande vers une variable ping_var
#On desactive l'affichage de la console a l'appel de subprocess
self.ping_var = str(subprocess.Popen("ping %s" %self.hostname, stdout=subprocess.PIPE, creationflags=8).stdout.read())
#On check si l'ip repond au ping
if "TTL" in self.ping_var:
self.text_result = "PingCheck : SUCCESS"
#Si la variable d'erreur est vraie, on reset le temps du dernier succes
if self.pingerror == True:
self.successtime = int(time.time())
#On remet la variable d'erreur sur faux
self.pingerror = False
else:
self.text_result = "PingCheck : FAIL"
#On met la variable d'erreur a l'etat vrai
self.pingerror = True
#On log dans le fichier si l'ip ne repond pas
self.report.write(time.strftime("%d-%m-%Y | %X", time.localtime()) + '\t PingCheck failed (Hostname : %s)\n'%self.hostname)
self.report.flush()
#On update le texte du resultat
self.ui.label_status.setText(self.text_result)
self.ui.label_status.setStyleSheet("color: rgba(255,0,0,1);")
#On log la reponse consecutive de l'ip pendant X sec
if (int(time.time()) >= (self.successtime + self.timecheck)):
self.report.write(time.strftime("%d-%m-%Y | %X", time.localtime()) + '\t %s secs of SUCCESS '%self.timecheck + '(Hostname : %s)\n'%self.hostname)
self.report.flush()
self.successtime = int(time.time())
def run(self):
while True:
self._ping_check(self.hostname, self.timecheck)
self.sleep(3)
#Application
class ShipHolderApplication(QMainWindow):
#On initialise l'interface graphique et le bouton start
def __init__(self):
super (self.__class__, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.input_file.setText("Report directory")
self.ui.button_start.clicked.connect(self.start_thread)
self.ui.button_file.clicked.connect(self.get_file)
def get_file(self):
self.report_file_path = QFileDialog.getExistingDirectory(self)
self.ui.input_file.setText(self.report_file_path)
def check_input(self):
#On initialise la liste sur True
self.check = [True,True,True]
#On recupere le chemin du rapport
self.report = str(self.ui.input_file.text())
if os.path.isdir(self.report) != True:
self.ui.input_file.setText("Error : Please select a directory")
self.check[2] = False
#On recupere la valeur d'input de l'host
self.host = str(self.ui.input_ip.text())
#On teste si l'ip est valide
if valid_ip(self.host) != True:
#On affiche un message d'erreur si elle ne l'est pas
self.ui.label_iperror.setText("Wrong IP format")
#On met l'element de la liste sur False
self.check[0] = False
else:
self.ui.label_iperror.setText("")
#On recupere la valeur d'input time
self.period = str(self.ui.input_time.text())
#On essaye de convertir la chaine en entier
try:
int(self.period)
except:
#On affiche un message d'erreur si besoin
self.ui.label_timerror.setText("Wrong time format")
#On met la liste a jour
self.check[1] = False
else:
self.ui.label_timerror.setText("")
#Si c'est possible, on convertit la chaine en entier
self.period = int(self.period)
#On retourne la liste
return self.check
def start_thread(self):
#Uniquement si les input sont valides
if self.check_input() == [True,True,True]:
#On charge le thread
self.get_thread = pingThread(self.host,self.period,self.ui, self.report)
#On l'execute
self.get_thread.start()
#On active le bouton stop
self.ui.button_stop.setEnabled(True)
#On desactive les input tant que le thread tourne
self.ui.input_ip.setDisabled(True)
self.ui.input_time.setDisabled(True)
self.ui.input_file.setDisabled(True)
#On desactive le bouton de recherche
self.ui.button_file.setEnabled(False)
#On connecte le bouton stop a la fonction de stop
self.ui.button_stop.clicked.connect(self.end_thread)
#On desactive le bouton start pour ne pas lancer d'autre thread en meme temps
self.ui.button_start.setEnabled(False)
def end_thread(self):
self.get_thread.terminate()
self.ui.button_start.setEnabled(True)
self.button_file.setEnabled(True)
self.ui.input_ip.setDisabled(False)
self.ui.input_time.setDisabled(False)
self.ui.input_file.setDisabled(False)
self.ui.button_stop.setEnabled(False)
def valid_ip(address):
try:
socket.inet_aton(address)
return True
except:
return False
def exitapp(app):
app.exec_()
def main():
app = QApplication(sys.argv)
myapp = ShipHolderApplication()
myapp.setWindowTitle("PyPingCheck")
myapp.setWindowIcon(QIcon("Icone/ping_icon.png"))
myapp.show()
sys.exit(exitapp(app))
if __name__ == '__main__':
main()
| gpl-3.0 | -1,618,018,899,325,959,200 | 31.99505 | 157 | 0.613053 | false |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/policyinsights/tests/latest/test_policyinsights_scenario.py | 2 | 24331 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer, create_random_name, record_only
class PolicyInsightsTests(ScenarioTest):
# Current recording was recorded against "Azure Governance Policy UX Test" (e78961ba-36fe-4739-9212-e3031b4c8db7)
@record_only()
def test_policy_insights(self):
top_clause = '--top 2'
filter_clause = '--filter "isCompliant eq false"'
apply_clause = '--apply "groupby((policyAssignmentId, resourceId), aggregate($count as numRecords))"'
select_clause = '--select "policyAssignmentId, resourceId, numRecords"'
order_by_clause = '--order-by "numRecords desc"'
from_clause = '--from "2020-04-01T00:00:00Z"'
to_clause = '--to "2020-04-07T01:30:00Z"'
scopes = [
'-m "azgovtest5"',
'',
'-g "defaultresourcegroup-eus"',
'--resource "/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/cheggpolicy/providers/microsoft.keyvault/vaults/cheggtmpkv"',
'--resource "cheggkv" --namespace "microsoft.keyvault" --resource-type "vaults" -g "cheggpolicy"',
'--resource "subnet2" --namespace "microsoft.network" --resource-type "subnets" --parent "virtualnetworks/cheggvnet" -g "cheggpolicy"',
'-s "1f3afdf9-d0c9-4c3d-847f-89da613e70a8"',
'-d "34c877ad-507e-4c82-993e-3452a6e0ad3c"',
'-a "4d31128e32d04a0098fd536e"',
'-a "f32eeddfc32345b585f9a70b" -g "cheggpolicy" '
]
for scope in scopes:
events = self.cmd('az policy event list {} {} {} {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
apply_clause,
select_clause,
order_by_clause,
top_clause)).get_output_in_json()
assert len(events) >= 0
states = self.cmd('az policy state list {} {} {} {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
apply_clause,
select_clause,
order_by_clause,
top_clause)).get_output_in_json()
assert len(states) >= 0
summary = self.cmd('az policy state summarize {} {} {} {} {}'.format(
scope,
from_clause,
to_clause,
filter_clause,
top_clause)).get_output_in_json()
assert summary["results"] is not None
assert len(summary["policyAssignments"]) >= 0
if len(summary["policyAssignments"]) > 0:
assert summary["policyAssignments"][0]["results"] is not None
assert len(summary["policyAssignments"][0]["policyDefinitions"]) >= 0
if len(summary["policyAssignments"][0]["policyDefinitions"]) > 0:
assert summary["policyAssignments"][0]["policyDefinitions"][0]["results"] is not None
states = self.cmd('az policy state list {} {} {}'.format(
scopes[3],
'--expand PolicyEvaluationDetails',
top_clause
), checks=[
self.check('length([?complianceState==`NonCompliant`].policyEvaluationDetails)', 2)
])
@ResourceGroupPreparer(name_prefix='cli_test_triggerscan')
def test_policy_insights_triggerscan(self):
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'bip': '96670d01-0a4d-4649-9c89-2d3abc0a5025'
})
# create a subscription policy assignment that we can get an updated compliance state for
self.cmd(
'policy assignment create --policy {bip} -n {pan} --resource-group {rg} -p \'{{ "tagName": {{ "value": "notThere" }} }}\'')
# trigger a subscription scan and do not wait for it to complete
self.cmd('policy state trigger-scan --no-wait', checks=[
self.is_empty()
])
# trigger a resource group scan and wait for it to complete
self.cmd('policy state trigger-scan -g {rg}', checks=[
self.is_empty()
])
# ensure the compliance state of the resource group was updated
self.cmd('policy state list -g {rg} -a {pan} --filter \"isCompliant eq false\"', checks=[
self.check("length([])", 1)
])
@ResourceGroupPreparer(name_prefix='cli_test_remediation')
@StorageAccountPreparer(name_prefix='cliremediation')
def test_policy_insights_remediation(self, resource_group_location, storage_account):
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'rn': self.create_random_name('azurecli-test-remediation', 40),
'bip': '06a78e20-9358-41c9-923c-fb736d382a4d',
'location': resource_group_location,
'sa': storage_account
})
# create a subscription policy assignment that we can trigger remediations on
assignment = self.cmd('policy assignment create --policy {bip} -n {pan}').get_output_in_json()
self.kwargs['pid'] = assignment['id'].lower()
try:
# create a remediation at resource group scope
self.cmd('policy remediation create -n {rn} -g {rg} -a {pan}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', None),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation show -n {rn} -g {rg}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', None),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation list -g {rg}', checks=[
self.check("length([?name == '{rn}'])", 1),
])
self.cmd('policy remediation deployment list -n {rn} -g {rg}', checks=[
self.is_empty()
])
self.cmd('policy remediation delete -n {rn} -g {rg}')
self.cmd('policy remediation list -g {rg}', checks=[
self.check("length([?name == '{rn}'])", 0)
])
# create a remediation at subscription scope with location filters
self.cmd('policy remediation create -n {rn} -a {pan} --location-filters {location}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('resourceGroup', None),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', None),
self.check('filters.locations[*] | length([])', 1),
self.check('filters.locations[0]', '{location}'),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation show -n {rn}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('resourceGroup', None),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', None),
self.check('filters.locations[*] | length([])', 1),
self.check('filters.locations[0]', '{location}'),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation list', checks=[
self.check("length([?name == '{rn}'])", 1),
])
self.cmd('policy remediation deployment list -n {rn}', checks=[
self.is_empty()
])
self.cmd('policy remediation delete -n {rn}')
self.cmd('policy remediation list', checks=[
self.check("length([?name == '{rn}'])", 0)
])
# create a remediation at individual resource scope
self.cmd('policy remediation create -n {rn} -a {pan} -g {rg} --namespace "Microsoft.Storage" --resource-type storageAccounts --resource {sa}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', None),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation show -n {rn} -g {rg} --namespace "Microsoft.Storage" --resource-type storageAccounts --resource {sa}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('resourceGroup', '{rg}'),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', None),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation list -g {rg} --namespace "Microsoft.Storage" --resource-type storageAccounts --resource {sa}', checks=[
self.check("length([?name == '{rn}'])", 1),
])
self.cmd('policy remediation deployment list -n {rn} -g {rg} --namespace "Microsoft.Storage" --resource-type storageAccounts --resource {sa}', checks=[
self.is_empty()
])
self.cmd('policy remediation delete -n {rn} -g {rg} --namespace "Microsoft.Storage" --resource-type storageAccounts --resource {sa}')
self.cmd('policy remediation list -g {rg} --namespace "Microsoft.Storage" --resource-type storageAccounts --resource {sa}', checks=[
self.check("length([?name == '{rn}'])", 0)
])
finally:
self.cmd('policy assignment delete -n {pan}')
def test_policy_insights_remediation_policy_set(self):
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'psn': self.create_random_name('azurecli-test-policy-set', 40),
'rn': self.create_random_name('azurecli-test-remediation', 40),
'drid': self.create_random_name('cli-test-reference-id', 40),
'bip': '/providers/microsoft.authorization/policyDefinitions/06a78e20-9358-41c9-923c-fb736d382a4d'
})
try:
# create a policy set that will be remediated
self.cmd('policy set-definition create -n {psn} --definitions "[{{ \\"policyDefinitionId\\": \\"{bip}\\", \\"policyDefinitionReferenceId\\": \\"{drid}\\" }}]"')
# create a subscription policy assignment that we can trigger remediations on
assignment = self.cmd('policy assignment create --policy-set-definition {psn} -n {pan}').get_output_in_json()
self.kwargs['pid'] = assignment['id'].lower()
# create a remediation at subscription scop
self.cmd('policy remediation create -n {rn} -a {pan} --definition-reference-id {drid}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('resourceGroup', None),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', '{drid}'),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation show -n {rn}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('resourceGroup', None),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', '{drid}'),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation list', checks=[
self.check("length([?name == '{rn}'])", 1),
])
self.cmd('policy remediation deployment list -n {rn}', checks=[
self.is_empty()
])
self.cmd('policy remediation delete -n {rn}')
self.cmd('policy remediation list', checks=[
self.check("length([?name == '{rn}'])", 0)
])
finally:
self.cmd('policy assignment delete -n {pan}')
self.cmd('policy set-definition delete -n {psn}')
# This is record only since MG auth can take a while to propagate and management groups can be disruptive
@record_only()
def test_policy_insights_remediation_management_group(self):
self.kwargs.update({
'pan': self.create_random_name('cli-test-pa', 23),
'rn': self.create_random_name('cli-test-remediation', 30),
'mg': self.create_random_name('cli-test-mg', 30),
'bip': '06a78e20-9358-41c9-923c-fb736d382a4d'
})
# create a management group we can assign policy to
management_group = self.cmd('account management-group create -n {mg}').get_output_in_json()
try:
# create a policy assignment that we can trigger remediations on
self.kwargs['mgid'] = management_group['id']
assignment = self.cmd('policy assignment create --scope {mgid} --policy {bip} -n {pan}').get_output_in_json()
self.kwargs['pid'] = assignment['id'].lower()
# create a remediation at management group scope
self.cmd('policy remediation create -n {rn} -m {mg} -a {pid}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('policyAssignmentId', '{pid}'),
self.check('resourceGroup', None),
self.check('policyDefinitionReferenceId', None),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation show -n {rn} -m {mg}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Succeeded'),
self.check('policyAssignmentId', '{pid}'),
self.check('resourceGroup', None),
self.check('policyDefinitionReferenceId', None),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation list -m {mg}', checks=[
self.check("length([?name == '{rn}'])", 1),
])
self.cmd('policy remediation deployment list -n {rn} -m {mg}', checks=[
self.is_empty()
])
self.cmd('policy remediation delete -n {rn} -m {mg}')
self.cmd('policy remediation list -m {mg}', checks=[
self.check("length([?name == '{rn}'])", 0)
])
finally:
self.cmd('policy assignment delete -n {pan} --scope {mgid}')
self.cmd('account management-group delete -n {mg}')
# Executing a real remediation requires more time-intensive setup than can be done in a live scenario test.
# This record_only test executes a real remediation against a known non-compliant policy
# Test setup required for running the test live:
# 1. Create a resource group by name 'az-cli-policy-insights-test'
# 2. Create 2 Windows 10 Pro VMs in two different regions in above RG
# 3. At above RG scope, create a new policy assignment for built-in definition with name 'e0efc13a-122a-47c5-b817-2ccfe5d12615' and display name 'Deploy requirements to audit Windows VMs that do not have the specified Windows PowerShell execution policy'
# 4. Update the 'pan' key value in test code below with the assignment name created above
# 5. Trigger an on-demand evaluation scan on above RG by calling triggerEvaluation API. Check https://docs.microsoft.com/en-us/azure/governance/policy/how-to/get-compliance-data#on-demand-evaluation-scan
# 6. After step 5 completes, you should see the two VMs listed as non-compliant resources for the above assignment
# 7. Now run the testcase in live mode using command 'azdev test test_policy_insights_remediation_complete --live'
@record_only()
@AllowLargeResponse()
def test_policy_insights_remediation_complete(self):
self.kwargs.update({
'pan': '98904c39668a4f70804aef09',
'rg': 'az-cli-policy-insights-test',
'rn': self.create_random_name('azurecli-test-remediation', 40)
})
assignment = self.cmd('policy assignment show -g {rg} -n {pan}').get_output_in_json()
self.kwargs['pid'] = assignment['id'].lower()
# create a remediation at resource group scope
self.cmd('policy remediation create -n {rn} -g {rg} -a {pan}', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Accepted'),
self.check('resourceGroup', '{rg}'),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', None),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 2),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation show -n {rn} -g {rg}', checks=[
self.check('name', '{rn}'),
self.check('resourceGroup', '{rg}'),
self.check('policyAssignmentId', '{pid}'),
self.check('deploymentStatus.totalDeployments', 2),
self.check('resourceDiscoveryMode', 'ExistingNonCompliant')
])
self.cmd('policy remediation list -g {rg}', checks=[
self.check("length([?name == '{rn}'])", 1)
])
self.cmd('policy remediation deployment list -n {rn} -g {rg}', checks=[
self.check('length([])', 2),
self.exists('[0].createdOn'),
self.exists('[0].lastUpdatedOn'),
self.exists('[0].resourceLocation'),
self.exists('[0].status'),
self.check("length([?contains(@.remediatedResourceId, '/resourcegroups/{rg}/providers/microsoft.compute/virtualmachines')])", 2)
])
# cancel the remediation
self.cmd('policy remediation cancel -n {rn} -g {rg}', checks=[
self.check('provisioningState', 'Cancelling')
])
# Test a remediation that re-evaluates compliance results before remediating
@ResourceGroupPreparer(name_prefix='cli_test_remediation')
@StorageAccountPreparer(name_prefix='cliremediation')
def test_policy_insights_remediation_reevaluate(self, resource_group_location, storage_account):
self.kwargs.update({
'pan': self.create_random_name('azurecli-test-policy-assignment', 40),
'rn': self.create_random_name('azurecli-test-remediation', 40),
'bip': '5ffd78d9-436d-4b41-a421-5baa819e3008',
'location': resource_group_location,
'sa': storage_account
})
# create a resource group policy assignment that we can trigger remediations on
assignment = self.cmd(
'policy assignment create --policy {bip} -g {rg} -n {pan} --location {location} --assign-identity -p \'{{"tagName": {{ "value": "cliTagKey" }}, "tagValue": {{ "value": "cliTagValue" }} }}\'').get_output_in_json()
self.kwargs['pid'] = assignment['id'].lower()
# create a remediation at resource group scope that will re-evaluate compliance
self.cmd('policy remediation create -n {rn} -g {rg} -a {pan} --resource-discovery-mode ReEvaluateCompliance', checks=[
self.check('name', '{rn}'),
self.check('provisioningState', 'Accepted'),
self.check('resourceGroup', '{rg}'),
self.check('policyAssignmentId', '{pid}'),
self.check('policyDefinitionReferenceId', None),
self.check('filters', None),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ReEvaluateCompliance')
])
self.cmd('policy remediation show -n {rn} -g {rg}', checks=[
self.check('name', '{rn}'),
self.check_pattern('provisioningState', '(?:Evaluating|Accepted)'),
self.check('resourceGroup', '{rg}'),
self.check('policyAssignmentId', '{pid}'),
self.check('deploymentStatus.totalDeployments', 0),
self.check('resourceDiscoveryMode', 'ReEvaluateCompliance')
])
self.cmd('policy remediation list -g {rg}', checks=[
self.check("length([?name == '{rn}'])", 1)
])
self.cmd('policy remediation deployment list -n {rn} -g {rg}', checks=[
self.check('length([])', 0)
])
# cancel the remediation
self.cmd('policy remediation cancel -n {rn} -g {rg}', checks=[
self.check('provisioningState', 'Cancelling')
])
@AllowLargeResponse()
def test_policy_insights_metadata(self):
# Get all metadata resources
all_metadata_resources = self.cmd('policy metadata list').get_output_in_json()
assert len(all_metadata_resources) > 1
# Test the --top argument
assert len(self.cmd('policy metadata list --top 0').get_output_in_json()) == 0
top_metadata_resources = self.cmd('policy metadata list --top {}'.format(len(all_metadata_resources) + 1)).get_output_in_json()
assert len(top_metadata_resources) == len(all_metadata_resources)
top_metadata_resources = self.cmd('policy metadata list --top {}'.format(len(all_metadata_resources) - 1)).get_output_in_json()
assert len(top_metadata_resources) == len(all_metadata_resources) - 1
# Test getting an individual resouce
resource_metadata_name = top_metadata_resources[0]['name']
metadata_resource = self.cmd('policy metadata show --name {}'.format(resource_metadata_name)).get_output_in_json()
assert metadata_resource['name'] == resource_metadata_name
metadata_resource = self.cmd('policy metadata show -n {}'.format(resource_metadata_name)).get_output_in_json()
assert metadata_resource['name'] == resource_metadata_name
| mit | -147,293,067,343,778,200 | 49.16701 | 261 | 0.577124 | false |
mallconnectionorg/openerp | rrhh/l10n_cl_hr_payroll/model/hr_family_responsibilities.py | 1 | 2469 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##############################################################################
#
# Pedro Arroyo M <[email protected]>
# Copyright (C) 2015 Mall Connection(<http://www.mallconnection.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv
from osv import fields
class hr_family_responsibilities(osv.osv):
'''
Open ERP Model
'''
_name = 'hr.family.responsibilities'
_description = 'openerpmodel'
_columns = {
'name':fields.char('Name', size=64, required=True, readonly=False),
'type':fields.selection([
('simple','simple responsibility'),
('maternal','maternal responsibility'),
('invalid','invalid responsibility'),
], 'State', select=True),
'relationship':fields.selection([
('father','father'),
('son','son / daughter'),
('spouse','spouse'),
('Father in law','Father in law / mother in law'),
('son','son / daughter'),
('second','second'),
('Grandfather','Grandfather / Grandmother'),
('grandchild','grandchild / granddaughter'),
('sister','sister / brother'),
('brother in law','brother in law / sister in law'),
], 'Relationship', select=True, readonly=False),
'vat': fields.char('TIN', size=32, help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements."),
'employee_id': fields.many2one('hr.employee', string='Employee'),
}
hr_family_responsibilities() | agpl-3.0 | 8,236,057,115,983,737,000 | 42.333333 | 177 | 0.565411 | false |
airalcorn2/Deep-Semantic-Similarity-Model | deep_semantic_similarity_keras.py | 1 | 8280 | # Michael A. Alcorn ([email protected])
# An implementation of the Deep Semantic Similarity Model (DSSM) found in [1].
# [1] Shen, Y., He, X., Gao, J., Deng, L., and Mesnil, G. 2014. A latent semantic model
# with convolutional-pooling structure for information retrieval. In CIKM, pp. 101-110.
# http://research.microsoft.com/pubs/226585/cikm2014_cdssm_final.pdf
# [2] http://research.microsoft.com/en-us/projects/dssm/
# [3] http://research.microsoft.com/pubs/238873/wsdm2015.v3.pdf
import numpy as np
from keras import backend
from keras.layers import Activation, Input
from keras.layers.core import Dense, Lambda, Reshape
from keras.layers.convolutional import Convolution1D
from keras.layers.merge import concatenate, dot
from keras.models import Model
LETTER_GRAM_SIZE = 3 # See section 3.2.
WINDOW_SIZE = 3 # See section 3.2.
TOTAL_LETTER_GRAMS = int(3 * 1e4) # Determined from data. See section 3.2.
WORD_DEPTH = WINDOW_SIZE * TOTAL_LETTER_GRAMS # See equation (1).
K = 300 # Dimensionality of the max-pooling layer. See section 3.4.
L = 128 # Dimensionality of latent semantic space. See section 3.5.
J = 4 # Number of random unclicked documents serving as negative examples for a query. See section 4.
FILTER_LENGTH = 1 # We only consider one time step for convolutions.
# Input tensors holding the query, positive (clicked) document, and negative (unclicked) documents.
# The first dimension is None because the queries and documents can vary in length.
query = Input(shape = (None, WORD_DEPTH))
pos_doc = Input(shape = (None, WORD_DEPTH))
neg_docs = [Input(shape = (None, WORD_DEPTH)) for j in range(J)]
# Query model. The paper uses separate neural nets for queries and documents (see section 5.2).
# In this step, we transform each word vector with WORD_DEPTH dimensions into its
# convolved representation with K dimensions. K is the number of kernels/filters
# being used in the operation. Essentially, the operation is taking the dot product
# of a single weight matrix (W_c) with each of the word vectors (l_t) from the
# query matrix (l_Q), adding a bias vector (b_c), and then applying the tanh activation.
# That is, h_Q = tanh(W_c • l_Q + b_c). With that being said, that's not actually
# how the operation is being calculated here. To tie the weights of the weight
# matrix (W_c) together, we have to use a one-dimensional convolutional layer.
# Further, we have to transpose our query matrix (l_Q) so that time is the first
# dimension rather than the second (as described in the paper). That is, l_Q[0, :]
# represents our first word vector rather than l_Q[:, 0]. We can think of the weight
# matrix (W_c) as being similarly transposed such that each kernel is a column
# of W_c. Therefore, h_Q = tanh(l_Q • W_c + b_c) with l_Q, W_c, and b_c being
# the transposes of the matrices described in the paper. Note: the paper does not
# include bias units.
query_conv = Convolution1D(K, FILTER_LENGTH, padding = "same", input_shape = (None, WORD_DEPTH), activation = "tanh")(query) # See equation (2).
# Next, we apply a max-pooling layer to the convolved query matrix. Keras provides
# its own max-pooling layers, but they cannot handle variable length input (as
# far as I can tell). As a result, I define my own max-pooling layer here. In the
# paper, the operation selects the maximum value for each row of h_Q, but, because
# we're using the transpose, we're selecting the maximum value for each column.
query_max = Lambda(lambda x: backend.max(x, axis = 1), output_shape = (K, ))(query_conv) # See section 3.4.
# In this step, we generate the semantic vector represenation of the query. This
# is a standard neural network dense layer, i.e., y = tanh(W_s • v + b_s). Again,
# the paper does not include bias units.
query_sem = Dense(L, activation = "tanh", input_dim = K)(query_max) # See section 3.5.
# The document equivalent of the above query model.
doc_conv = Convolution1D(K, FILTER_LENGTH, padding = "same", input_shape = (None, WORD_DEPTH), activation = "tanh")
doc_max = Lambda(lambda x: backend.max(x, axis = 1), output_shape = (K, ))
doc_sem = Dense(L, activation = "tanh", input_dim = K)
pos_doc_conv = doc_conv(pos_doc)
neg_doc_convs = [doc_conv(neg_doc) for neg_doc in neg_docs]
pos_doc_max = doc_max(pos_doc_conv)
neg_doc_maxes = [doc_max(neg_doc_conv) for neg_doc_conv in neg_doc_convs]
pos_doc_sem = doc_sem(pos_doc_max)
neg_doc_sems = [doc_sem(neg_doc_max) for neg_doc_max in neg_doc_maxes]
# This layer calculates the cosine similarity between the semantic representations of
# a query and a document.
R_Q_D_p = dot([query_sem, pos_doc_sem], axes = 1, normalize = True) # See equation (4).
R_Q_D_ns = [dot([query_sem, neg_doc_sem], axes = 1, normalize = True) for neg_doc_sem in neg_doc_sems] # See equation (4).
concat_Rs = concatenate([R_Q_D_p] + R_Q_D_ns)
concat_Rs = Reshape((J + 1, 1))(concat_Rs)
# In this step, we multiply each R(Q, D) value by gamma. In the paper, gamma is
# described as a smoothing factor for the softmax function, and it's set empirically
# on a held-out data set. We're going to learn gamma's value by pretending it's
# a single 1 x 1 kernel.
weight = np.array([1]).reshape(1, 1, 1)
with_gamma = Convolution1D(1, 1, padding = "same", input_shape = (J + 1, 1), activation = "linear", use_bias = False, weights = [weight])(concat_Rs) # See equation (5).
with_gamma = Reshape((J + 1, ))(with_gamma)
# Finally, we use the softmax function to calculate P(D+|Q).
prob = Activation("softmax")(with_gamma) # See equation (5).
# We now have everything we need to define our model.
model = Model(inputs = [query, pos_doc] + neg_docs, outputs = prob)
model.compile(optimizer = "adadelta", loss = "categorical_crossentropy")
# Build a random data set.
sample_size = 10
l_Qs = []
pos_l_Ds = []
# Variable length input must be handled differently from padded input.
BATCH = True
(query_len, doc_len) = (5, 100)
for i in range(sample_size):
if BATCH:
l_Q = np.random.rand(query_len, WORD_DEPTH)
l_Qs.append(l_Q)
l_D = np.random.rand(doc_len, WORD_DEPTH)
pos_l_Ds.append(l_D)
else:
query_len = np.random.randint(1, 10)
l_Q = np.random.rand(1, query_len, WORD_DEPTH)
l_Qs.append(l_Q)
doc_len = np.random.randint(50, 500)
l_D = np.random.rand(1, doc_len, WORD_DEPTH)
pos_l_Ds.append(l_D)
neg_l_Ds = [[] for j in range(J)]
for i in range(sample_size):
possibilities = list(range(sample_size))
possibilities.remove(i)
negatives = np.random.choice(possibilities, J, replace = False)
for j in range(J):
negative = negatives[j]
neg_l_Ds[j].append(pos_l_Ds[negative])
if BATCH:
y = np.zeros((sample_size, J + 1))
y[:, 0] = 1
l_Qs = np.array(l_Qs)
pos_l_Ds = np.array(pos_l_Ds)
for j in range(J):
neg_l_Ds[j] = np.array(neg_l_Ds[j])
history = model.fit([l_Qs, pos_l_Ds] + [neg_l_Ds[j] for j in range(J)], y, epochs = 1, verbose = 0)
else:
y = np.zeros(J + 1).reshape(1, J + 1)
y[0, 0] = 1
for i in range(sample_size):
history = model.fit([l_Qs[i], pos_l_Ds[i]] + [neg_l_Ds[j][i] for j in range(J)], y, epochs = 1, verbose = 0)
# Here, I walk through how to define a function for calculating output from the
# computational graph. Let's define a function that calculates R(Q, D+) for a given
# query and clicked document. The function depends on two inputs, query and pos_doc.
# That is, if you start at the point in the graph where R(Q, D+) is calculated
# and then work backwards as far as possible, you'll end up at two different starting
# points: query and pos_doc. As a result, we supply those inputs in a list to the
# function. This particular function only calculates a single output, but multiple
# outputs are possible (see the next example).
get_R_Q_D_p = backend.function([query, pos_doc], [R_Q_D_p])
if BATCH:
get_R_Q_D_p([l_Qs, pos_l_Ds])
else:
get_R_Q_D_p([l_Qs[0], pos_l_Ds[0]])
# A slightly more complex function. Notice that both neg_docs and the output are
# lists.
get_R_Q_D_ns = backend.function([query] + neg_docs, R_Q_D_ns)
if BATCH:
get_R_Q_D_ns([l_Qs] + [neg_l_Ds[j] for j in range(J)])
else:
get_R_Q_D_ns([l_Qs[0]] + neg_l_Ds[0])
| mit | 1,212,973,223,307,917,600 | 46.551724 | 168 | 0.68685 | false |
protonyx/labtronyx-gui | labtronyxgui/applets/Resources/VISA.py | 1 | 2904 | """
.. codeauthor:: Kevin Kennedy <[email protected]>
"""
from Base_Applet import Base_Applet
import Tkinter as Tk
from widgets import *
class VISA(Base_Applet):
info = {
# Description
'description': 'Generic view for VISA Resources',
# List of compatible resource types
'validResourceTypes': ['VISA']
}
def run(self):
self.wm_title("VISA Resource")
self.instr = self.getInstrument()
# Driver info
self.w_info = vw_info.vw_DriverInfo(self, self.instr)
self.w_info.grid(row=0, column=0, columnspan=2)
# Send
self.send_val = Tk.StringVar(self)
self.lbl_send = Tk.Label(self, width=20,
text="Send Command",
anchor=Tk.W, justify=Tk.LEFT)
self.lbl_send.grid(row=1, column=0)
self.txt_send = Tk.Entry(self, width=40,
textvariable=self.send_val)
self.txt_send.grid(row=1, column=1)
# Buttons
self.f_buttons = Tk.Frame(self, padx=5, pady=5)
self.btn_write = Tk.Button(self.f_buttons,
text="Write",
command=self.cb_write,
width=10,
padx=3)
self.btn_write.pack(side=Tk.LEFT)
self.btn_query = Tk.Button(self.f_buttons,
text="Query",
command=self.cb_query,
width=10,
padx=3)
self.btn_query.pack(side=Tk.LEFT),
self.btn_read = Tk.Button(self.f_buttons,
text="Read",
command=self.cb_read,
width=10,
padx=3)
self.btn_read.pack(side=Tk.LEFT)
self.f_buttons.grid(row=2, column=1)
# Receive
self.lbl_receive = Tk.Label(self, width=20,
text="Received Data",
anchor=Tk.W, justify=Tk.LEFT)
self.lbl_receive.grid(row=3, column=0)
self.txt_receive = Tk.Text(self, state=Tk.DISABLED,
width=20, height=10)
self.txt_receive.grid(row=3, column=1,
sticky=Tk.N+Tk.E+Tk.S+Tk.W)
def cb_write(self):
data = self.send_val.get()
self.instr.write(data)
def cb_query(self):
self.cb_write()
self.cb_read()
def cb_read(self):
data = self.instr.read()
self.txt_receive.configure(state=Tk.NORMAL)
self.txt_receive.delete(1, Tk.END)
self.txt_receive.insert(Tk.END, data)
self.txt_receive.configure(state=Tk.DISABLED)
| mit | -527,448,498,447,606,460 | 32.37931 | 68 | 0.474518 | false |
SetBased/py-kerapu | kerapu/command/TestsetShredderCommand.py | 1 | 11590 | import csv
import datetime
import os
import random
import shutil
import string
import zipfile
from typing import Iterable, List, Dict
from cleo import Command
from lxml import etree
from kerapu.style.KerapuStyle import KerapuStyle
class TestShredderCommand(Command):
"""
Converteert XML-bestand met de testset naar een CSV-bestand
kerapu:test-shredder
{testset-zip : ZIP-bestand met de testset}
{testset-csv : Path waar het CSV-bestand met de tests moeten worden opgeslagen}
"""
# ------------------------------------------------------------------------------------------------------------------
def __extract_zip_file(self, zip_filename: str, tmp_dir: str):
"""
Extracts het ZIP-bestand met de testset in een folder.
:param str zip_filename: Het path naar het ZIP-bestand met de testset.
:param str tmp_dir: Path naar de folder.
"""
self.output.writeln('Uitpakken van <fso>{}</fso> in <fso>{}</fso>'.format(zip_filename, tmp_dir))
with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
zip_ref.extractall(tmp_dir)
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def ordinal(path: str) -> int:
"""
Geeft het volgnummer van een test.
:param str path: Het path naar het XML-bestand met de test case.
"""
parts = os.path.basename(path).split('_')
return int(parts[6])
# ------------------------------------------------------------------------------------------------------------------
def __lees_test_cases_lijst(self, folder: str) -> List:
"""
Geeft een lijst met alle bestanden in een folder.
:param str folder: Het path naar de folder.
"""
entries = os.listdir(folder)
filenames = list()
for entry in entries:
path = os.path.join(folder, entry)
if os.path.isfile(path):
filenames.append(path)
self.output.writeln('Aantal gevonden test cases: {}'.format(len(filenames)))
return sorted(filenames, key=TestShredderCommand.ordinal)
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __maak_xpath(parts: Iterable) -> str:
"""
Maakt een string met een xpath.
:param tuple parts: The onderdelen van het xpath.
:rtype: str
"""
xpath = ''
for part in parts:
if xpath:
xpath += '/'
xpath += 'xmlns:' + part
return xpath
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __convert_date(date: str) -> str:
"""
Converteert een datum in YYYYMMDD formaat naar YYYY-MM-DD format.
:param str date: De datum in YYYYMMDD format.
:rtype: str
"""
return date[:4] + '-' + date[4:6] + '-' + date[6:8]
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __leeftijd_geboorte_datum(date: str, leeftijd: int) -> str:
"""
Geeft de geboortedatum gegeven een datum en een leeftijd (en de persoon is niet jarig).
:param str date: De gegeven datum in YYYY-MM-DD format.
:param int leeftijd: De leeftijd in jaren.
:rtype: int
"""
date = datetime.date(int(date[:4]) - leeftijd, int(date[5:7]), int(date[8:10]))
date -= datetime.timedelta(days=1)
return date.isoformat()
# ------------------------------------------------------------------------------------------------------------------
def __shred_xml_bestand(self, filename: str) -> Dict:
"""
Leest de relevante data in een XML-bestand met een test case.
:param str filename: De filenaam van het XML bestand.
:rtype: dict
"""
doc = etree.parse(filename)
xpath = '/soapenv:Envelope/soapenv:Body/xmlns:FICR_IN900101NL04'
namespaces = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
'xmlns': 'urn:hl7-org:v3'}
# Lees declaratiecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'id')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
declaratie_code = elements[0].get('extension')
# Lees specialismecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'derivedFrom',
'zorgtraject', 'responsibleParty', 'assignedPerson', 'code')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
specialisme_code = elements[0].get('code')
# Lees diagnosecode.
parts = (
'ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'pertinentInformation1',
'typerendeDiagnose', 'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
diagnose_code = elements[0].get('code')
# Lees zorgtypecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'code')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_type_code = elements[0].get('code') if elements else None
# Lees zorgvraagcode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'derivedFrom',
'zorgtraject', 'reason', 'zorgvraag', 'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_vraag_code = elements[0].get('code') if elements else None
# Lees begindatum.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'effectiveTime', 'low')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
begin_datum = self.__convert_date(elements[0].get('value')) if elements else None
# Lees de geboortedatum van de patient.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'subject', 'patient', 'subjectOf', 'leeftijd',
'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
leeftijd = int(elements[0].get('value')) if elements else None
geboorte_datum = self.__leeftijd_geboorte_datum(begin_datum, leeftijd)
# Lees het geslacht van de patient.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'subject', 'patient', 'patientPerson',
'administrativeGenderCode')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
geslacht_code = elements[0].get('code') if elements else None
# Lees de AGB-code van de zorginstelling.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'author', 'assignedOrganization', 'id')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_instelling_code = elements[0].get('extension') if elements else None
# Lees alle zorgactiviteiten.
zorg_activiteiten = list()
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'debit',
'zorgactiviteit')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
for element in elements:
path = 'xmlns:code'
sub_elements = element.xpath(path, namespaces=namespaces)
zorg_activiteit_code = sub_elements[0].get('code') if sub_elements else None
path = 'xmlns:repeatNumber'
sub_elements = element.xpath(path, namespaces=namespaces)
aantal = int(sub_elements[0].get('value')) if sub_elements else None
zorg_activiteiten.append((zorg_activiteit_code, aantal))
return {'subtraject_nummer': os.path.basename(filename),
'declaratie_code': declaratie_code,
'specialisme_code': specialisme_code,
'diagnose_code': diagnose_code,
'zorg_type_code': zorg_type_code,
'zorg_vraag_code': zorg_vraag_code,
'begin_datum': begin_datum,
'geboorte_datum': geboorte_datum,
'geslacht_code': geslacht_code,
'zorg_instelling_code': zorg_instelling_code,
'zorg_activiteiten': zorg_activiteiten}
# ----------------------------------------------------------------------------------------------------------------------
@staticmethod
def __write_subtraject(writer, subtraject: Dict) -> None:
"""
Schrijft het subtraject met alle zorgactiviteiten naar een CSV-bestand.
:param writer: De handle naar de CSV writer.
:param dict subtraject: De details van het subtract.
"""
writer.writerow((subtraject['subtraject_nummer'],
subtraject['specialisme_code'],
subtraject['diagnose_code'],
subtraject['zorg_type_code'],
subtraject['zorg_vraag_code'],
subtraject['begin_datum'],
subtraject['geboorte_datum'],
subtraject['geslacht_code'],
subtraject['zorg_instelling_code'],
subtraject['declaratie_code']))
for zorgactiviteit in subtraject['zorg_activiteiten']:
writer.writerow((zorgactiviteit[0], zorgactiviteit[1]))
# ----------------------------------------------------------------------------------------------------------------------
def __extract_files(self, writer, filenames: List) -> None:
"""
Extract de data van een lijst met XML-bestanden met test cases en schrijft deze data naar een CSV-bestand.
:param writer: De handle naar de CSV writer.
:param list filenames: De lijst met bestandsnamen van XML-bestanden met test cases.
"""
for filename in filenames:
subtraject = self.__shred_xml_bestand(filename)
self.__write_subtraject(writer, subtraject)
# ------------------------------------------------------------------------------------------------------------------
def handle(self) -> int:
"""
Executes the command.
"""
self.output = KerapuStyle(self.input, self.output)
zip_filename = self.argument('testset-zip')
csv_filename = self.argument('testset-csv')
tmp_dir = '.kerapu-' + ''.join(random.choices(string.ascii_lowercase, k=12))
os.mkdir(tmp_dir)
self.__extract_zip_file(zip_filename, tmp_dir)
files = self.__lees_test_cases_lijst(tmp_dir)
with open(csv_filename, 'w', encoding='utf-8') as handle:
csv_writer = csv.writer(handle, dialect=csv.unix_dialect)
self.__extract_files(csv_writer, files)
shutil.rmtree(tmp_dir)
return 0
# ----------------------------------------------------------------------------------------------------------------------
| mit | 6,899,941,541,554,615,000 | 42.246269 | 124 | 0.528645 | false |
effigies/mne-python | examples/export/plot_epochs_to_nitime.py | 2 | 2043 | """
=======================
Export epochs to NiTime
=======================
This script shows how to export Epochs to the NiTime library
for further signal processing and data analysis.
"""
# Author: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
print(__doc__)
import numpy as np
import mne
from mne import io
from mne.datasets import sample
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# Export to NiTime
epochs_ts = epochs.to_nitime(picks=np.arange(20), collapse=True)
###############################################################################
# Now use nitime's OO-interface to compute coherence between sensors
from nitime.analysis import MTCoherenceAnalyzer
from nitime.viz import drawmatrix_channels
import matplotlib.pyplot as plt
# setup coherency analyzer
C = MTCoherenceAnalyzer(epochs_ts)
# confine analysis to 10 - 20 Hz
freq_idx = np.where((C.frequencies > 10) * (C.frequencies < 30))[0]
# compute average coherence
coh = np.mean(C.coherence[:, :, freq_idx], -1) # Averaging on last dimension
drawmatrix_channels(coh, epochs.ch_names, color_anchor=0,
title='MEG gradiometer coherence')
plt.show()
| bsd-3-clause | -4,635,458,188,188,562,000 | 30.430769 | 79 | 0.625551 | false |
Knygar/hwios | services/web_ui/models/ws_realm.py | 1 | 7844 | '''
Copyright (c) OS-Networks, http://os-networks.net
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the HWIOS Project nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
import os
import re
import uuid
import random
from twisted.internet import reactor, defer
from twisted.python import failure, log
from django.contrib.sessions.models import Session
from hwios.core.application import HWIOS
from web_ui.models.signal import Signal, SignalPool
from web_ui.models.hwm_queue import HWM_Queue
import web_ui.urls as urls
from web_ui.models.statics import *
from web_ui.models.profiles import Profile
from web_ui.models.client import Client
class WebSocketDispatcher(object):
#each websocket controller now has access to the signal pool
signals = SignalPool()
compiled_ws_patterns = []
valid_routes = {}
def __init__(self):
'''
Initialize all modules that are specified in urls.py
'''
self.pool = WebSocketPool(self.signals)
for pattern in urls.ws_patterns:
p = re.compile(pattern[0])
module = __import__(pattern[1], globals(), locals(), [pattern[2]],-1)
self.compiled_ws_patterns.append((p,module,pattern[2],pattern[3]))
for pattern in self.compiled_ws_patterns:
if pattern[2] not in self.valid_routes:
self.valid_routes[pattern[2]] ={'instance': getattr(pattern[1],pattern[2])(self),'methods':[]}
self.valid_routes[pattern[2]]['methods'].append(pattern[3])
def match(self, url):
'''Compare the url with a list of compiled regex patterns
@return Tuple
'''
for pattern in self.compiled_ws_patterns:
rp = pattern[0].match(url)
if rp != None:
return (pattern[2],pattern[3], rp.groupdict())
return None
def route(self, target):
'''Routes clientside HWM urls to the apprioate HWIOS function handler'''
cls, method, get_params = self.match(target)
if cls in self.valid_routes:
instance = self.valid_routes[cls]['instance']
if hasattr(instance, method):
return [instance, method, get_params]
else:
return None
return None
class WebSocketPool(object):
clients = []
subscription = {}
def __init__(self, signals):
self.signals = signals
self.userlist = []
#register signals
self.signals.append(Signal('view_changed'))
self.signals.append(Signal('ws_connect'))
self.signals.append(Signal('ws_disconnect'))
def name_taken(self,name):
for _client in self.clients:
print type(_client)
print _client.profile
if _client.profile != None:
if _client.profile.username == name:
return True
else:
return False
return False
def get_userlist(self):
userlist = []
for _client in self.clients:
print 'CLIENT:%s' % _client.profile.username
if hasattr(_client,'transport'):
userlist.append(_client.profile.username)
return userlist
def rm_subscription(self, client):
"""
When a client disconnects, remove all subscription references that may be left
"""
for area in self.subscription:
for cid in self.subscription[area]:
for _client in self.subscription[area][cid]['clients']:
if _client.profile.uuid == client.profile.uuid:
self.subscription[area][cid]['clients'].remove(_client)
def add_client(self, transport):
"""
After bootstrapping the client, the websocket connector needs to be added to the client....
"""
new_client = Client(transport.profile, transport.session, 'nl')
new_client.transport = transport
HWIOS.ws_realm.pool.clients.append(new_client)
log.msg('%s WS/76/HRM' % ('New client added...'),system='%s,IN' % transport.getPeer().host)
#self.clients.append(transport)
self.signals.send('ws_connect', client = new_client)
userlist = self.get_userlist()
for _client in self.clients:
#only send online update notification to already connected clients. New client will make it's own request
if _client.transport != new_client:
_client.remote('/data/modules/messenger/online/update/',{'online':userlist})
return new_client
def rm_client(self, transport):
"""
Remove a client from our clientlist, when the socket connection closes.
"""
self.signals.send('ws_disconnect', client = transport)
try:
for _client in self.clients:
if _client.transport == transport:
#_client.transport = None
self.clients.remove(_client)
self.rm_subscription(_client)
except ValueError: pass
userlist = self.get_userlist()
for _client in self.clients:
_client.remote('/data/modules/messenger/online/update/',{'online':userlist})
def get_clients(self):
return self.clients
def get_client(self, profile_uuid):
for _client in self.clients:
if _client.profile.uuid == profile_uuid:
return _client
return False
def get_anonymous_profile(self, session_id = None, ip = None):
profile = Profile()
profile.is_authenticated = False
while True:
pk = random.randrange(0, 10001, 2)
username = 'anonymous_%s' % pk
if not HWIOS.ws_realm.pool.name_taken(username):
profile.username = username
profile.pk = pk
if session_id != None:
profile.uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(session_id))
elif ip != None:
profile.uuid = uuid.uuid5(uuid.NAMESPACE_DNS, ip)
break
return profile
class WSRealm(object):
def __init__(self):
self.dispatcher = WebSocketDispatcher()
self.pool = self.dispatcher.pool
self.queue = HWM_Queue()
self._t = ws_table | bsd-3-clause | -7,726,291,159,420,705,000 | 37.455882 | 117 | 0.610785 | false |
swannapa/erpnext | erpnext/accounts/doctype/sales_invoice/sales_invoice.py | 1 | 35544 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import frappe.defaults
from frappe.utils import cint, flt
from frappe import _, msgprint, throw
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.controllers.stock_controller import update_gl_entries_after
from frappe.model.mapper import get_mapped_doc
from erpnext.accounts.doctype.sales_invoice.pos import update_multi_mode_option
from erpnext.controllers.selling_controller import SellingController
from erpnext.accounts.utils import get_account_currency
from erpnext.stock.doctype.delivery_note.delivery_note import update_billed_amount_based_on_so
from erpnext.projects.doctype.timesheet.timesheet import get_projectwise_timesheet_data
from erpnext.accounts.doctype.asset.depreciation \
import get_disposal_account_and_cost_center, get_gl_entries_on_asset_disposal
from erpnext.stock.doctype.batch.batch import set_batch_nos
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos, get_delivery_note_serial_no
from erpnext.setup.doctype.company.company import update_company_current_month_sales
from erpnext.accounts.general_ledger import get_round_off_account_and_cost_center
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class SalesInvoice(SellingController):
print "test"
def __init__(self, *args, **kwargs):
super(SalesInvoice, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Sales Invoice Item',
'target_field': 'billed_amt',
'target_ref_field': 'amount',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_billed',
'source_field': 'amount',
'join_field': 'so_detail',
'percent_join_field': 'sales_order',
'status_field': 'billing_status',
'keyword': 'Billed',
'overflow_type': 'billing'
}]
def set_indicator(self):
"""Set indicator for portal"""
if self.outstanding_amount > 0:
self.indicator_color = "orange"
self.indicator_title = _("Unpaid")
else:
self.indicator_color = "green"
self.indicator_title = _("Paid")
def validate(self):
super(SalesInvoice, self).validate()
self.validate_auto_set_posting_time()
if not self.is_pos:
self.so_dn_required()
self.validate_proj_cust()
self.validate_with_previous_doc()
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_uom_is_integer("uom", "qty")
self.check_close_sales_order("sales_order")
self.validate_debit_to_acc()
self.clear_unallocated_advances("Sales Invoice Advance", "advances")
self.add_remarks()
self.validate_write_off_account()
self.validate_account_for_change_amount()
self.validate_fixed_asset()
self.set_income_account_for_fixed_assets()
if cint(self.is_pos):
self.validate_pos()
if cint(self.update_stock):
self.validate_dropship_item()
self.validate_item_code()
self.validate_warehouse()
self.update_current_stock()
self.validate_delivery_note()
if not self.is_opening:
self.is_opening = 'No'
if self._action != 'submit' and self.update_stock and not self.is_return:
set_batch_nos(self, 'warehouse', True)
self.set_against_income_account()
self.validate_c_form()
self.validate_time_sheets_are_submitted()
self.validate_multiple_billing("Delivery Note", "dn_detail", "amount", "items")
if not self.is_return:
self.validate_serial_numbers()
self.update_packing_list()
self.set_billing_hours_and_amount()
self.update_timesheet_billing_for_project()
self.set_status()
def before_save(self):
set_account_for_mode_of_payment(self)
def on_submit(self):
self.validate_pos_paid_amount()
if not self.subscription:
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total, self)
self.check_prev_docstatus()
if self.is_return:
# NOTE status updating bypassed for is_return
self.status_updater = []
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_in_dn()
self.clear_unallocated_mode_of_payments()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
if self.update_stock == 1:
self.update_stock_ledger()
# this sequence because outstanding may get -ve
self.make_gl_entries()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.check_credit_limit()
self.update_serial_no()
if not cint(self.is_pos) == 1 and not self.is_return:
self.update_against_document_in_jv()
self.update_time_sheet(self.name)
self.update_current_month_sales()
def validate_pos_paid_amount(self):
if len(self.payments) == 0 and self.is_pos:
frappe.throw(_("At least one mode of payment is required for POS invoice."))
def before_cancel(self):
self.update_time_sheet(None)
def on_cancel(self):
self.check_close_sales_order("sales_order")
from erpnext.accounts.utils import unlink_ref_doc_from_payment_entries
if frappe.db.get_single_value('Accounts Settings', 'unlink_payment_on_cancellation_of_invoice'):
unlink_ref_doc_from_payment_entries(self)
if self.is_return:
# NOTE status updating bypassed for is_return
self.status_updater = []
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_in_dn()
if not self.is_return:
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.update_serial_no(in_cancel=True)
self.validate_c_form_on_cancel()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating reserved qty in bin depends upon updated delivered qty in SO
if self.update_stock == 1:
self.update_stock_ledger()
self.make_gl_entries_on_cancel()
frappe.db.set(self, 'status', 'Cancelled')
self.update_current_month_sales()
def update_current_month_sales(self):
if frappe.flags.in_test:
update_company_current_month_sales(self.company)
else:
frappe.enqueue('erpnext.setup.doctype.company.company.update_company_current_month_sales',
company=self.company)
def update_status_updater_args(self):
if cint(self.update_stock):
self.status_updater.extend([{
'source_dt':'Sales Invoice Item',
'target_dt':'Sales Order Item',
'target_parent_dt':'Sales Order',
'target_parent_field':'per_delivered',
'target_field':'delivered_qty',
'target_ref_field':'qty',
'source_field':'qty',
'join_field':'so_detail',
'percent_join_field':'sales_order',
'status_field':'delivery_status',
'keyword':'Delivered',
'second_source_dt': 'Delivery Note Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and update_stock = 1)"""
},
{
'source_dt': 'Sales Invoice Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'returned_qty',
'target_parent_dt': 'Sales Order',
# 'target_parent_field': 'per_delivered',
# 'target_ref_field': 'qty',
'source_field': '-1 * qty',
# 'percent_join_field': 'sales_order',
# 'overflow_type': 'delivery',
'extra_cond': """ and exists (select name from `tabSales Invoice` where name=`tabSales Invoice Item`.parent and update_stock=1 and is_return=1)"""
}
])
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
validate_against_credit_limit = False
for d in self.get("items"):
if not (d.sales_order or d.delivery_note):
validate_against_credit_limit = True
break
if validate_against_credit_limit:
check_credit_limit(self.customer, self.company)
def set_missing_values(self, for_validate=False):
pos = self.set_pos_fields(for_validate)
if not self.debit_to:
self.debit_to = get_party_account("Customer", self.customer, self.company)
if not self.due_date and self.customer:
self.due_date = get_due_date(self.posting_date, "Customer", self.customer, self.company)
super(SalesInvoice, self).set_missing_values(for_validate)
if pos:
return {"print_format": pos.get("print_format") }
def update_time_sheet(self, sales_invoice):
for d in self.timesheets:
if d.time_sheet:
timesheet = frappe.get_doc("Timesheet", d.time_sheet)
self.update_time_sheet_detail(timesheet, d, sales_invoice)
timesheet.calculate_total_amounts()
timesheet.calculate_percentage_billed()
timesheet.flags.ignore_validate_update_after_submit = True
timesheet.set_status()
timesheet.save()
def update_time_sheet_detail(self, timesheet, args, sales_invoice):
for data in timesheet.time_logs:
if (self.project and args.timesheet_detail == data.name) or \
(not self.project and not data.sales_invoice) or \
(not sales_invoice and data.sales_invoice == self.name):
data.sales_invoice = sales_invoice
def on_update(self):
self.set_paid_amount()
def set_paid_amount(self):
paid_amount = 0.0
base_paid_amount = 0.0
for data in self.payments:
data.base_amount = flt(data.amount*self.conversion_rate, self.precision("base_paid_amount"))
paid_amount += data.amount
base_paid_amount += data.base_amount
self.paid_amount = paid_amount
self.base_paid_amount = base_paid_amount
def validate_time_sheets_are_submitted(self):
for data in self.timesheets:
if data.time_sheet:
status = frappe.db.get_value("Timesheet", data.time_sheet, "status")
if status not in ['Submitted', 'Payslip']:
frappe.throw(_("Timesheet {0} is already completed or cancelled").format(data.time_sheet))
def set_pos_fields(self, for_validate=False):
"""Set retail related fields from POS Profiles"""
if cint(self.is_pos) != 1:
return
from erpnext.stock.get_item_details import get_pos_profile_item_details, get_pos_profile
pos = get_pos_profile(self.company)
if not self.get('payments') and not for_validate:
pos_profile = frappe.get_doc('POS Profile', pos.name) if pos else None
update_multi_mode_option(self, pos_profile)
if not self.account_for_change_amount:
self.account_for_change_amount = frappe.db.get_value('Company', self.company, 'default_cash_account')
if pos:
if not for_validate and not self.customer:
self.customer = pos.customer
self.mode_of_payment = pos.mode_of_payment
# self.set_customer_defaults()
if pos.get('account_for_change_amount'):
self.account_for_change_amount = pos.get('account_for_change_amount')
for fieldname in ('territory', 'naming_series', 'currency', 'taxes_and_charges', 'letter_head', 'tc_name',
'selling_price_list', 'company', 'select_print_heading', 'cash_bank_account',
'write_off_account', 'write_off_cost_center', 'apply_discount_on'):
if (not for_validate) or (for_validate and not self.get(fieldname)):
self.set(fieldname, pos.get(fieldname))
if not for_validate:
self.update_stock = cint(pos.get("update_stock"))
# set pos values in items
for item in self.get("items"):
if item.get('item_code'):
for fname, val in get_pos_profile_item_details(pos,
frappe._dict(item.as_dict()), pos).items():
if (not for_validate) or (for_validate and not item.get(fname)):
item.set(fname, val)
# fetch terms
if self.tc_name and not self.terms:
self.terms = frappe.db.get_value("Terms and Conditions", self.tc_name, "terms")
# fetch charges
if self.taxes_and_charges and not len(self.get("taxes")):
self.set_taxes()
return pos
def get_company_abbr(self):
return frappe.db.sql("select abbr from tabCompany where name=%s", self.company)[0][0]
def validate_debit_to_acc(self):
account = frappe.db.get_value("Account", self.debit_to,
["account_type", "report_type", "account_currency"], as_dict=True)
if not account:
frappe.throw(_("Debit To is required"))
if account.report_type != "Balance Sheet":
frappe.throw(_("Debit To account must be a Balance Sheet account"))
if self.customer and account.account_type != "Receivable":
frappe.throw(_("Debit To account must be a Receivable account"))
self.party_account_currency = account.account_currency
def clear_unallocated_mode_of_payments(self):
self.set("payments", self.get("payments", {"amount": ["not in", [0, None, ""]]}))
frappe.db.sql("""delete from `tabSales Invoice Payment` where parent = %s
and amount = 0""", self.name)
def validate_with_previous_doc(self):
super(SalesInvoice, self).validate_with_previous_doc({
"Sales Order": {
"ref_dn_field": "sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Sales Order Item": {
"ref_dn_field": "so_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Delivery Note": {
"ref_dn_field": "delivery_note",
"compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]]
},
"Delivery Note Item": {
"ref_dn_field": "dn_detail",
"compare_fields": [["item_code", "="], ["uom", "="], ["conversion_factor", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([
["Sales Order", "sales_order", "so_detail"],
["Delivery Note", "delivery_note", "dn_detail"]
])
def set_against_income_account(self):
"""Set against account for debit to account"""
against_acc = []
for d in self.get('items'):
if d.income_account not in against_acc:
against_acc.append(d.income_account)
self.against_income_account = ','.join(against_acc)
def add_remarks(self):
if not self.remarks: self.remarks = 'No Remarks'
def validate_auto_set_posting_time(self):
# Don't auto set the posting date and time if invoice is amended
if self.is_new() and self.amended_from:
self.set_posting_time = 1
self.validate_posting_time()
def so_dn_required(self):
"""check in manage account if sales order / delivery note required or not."""
dic = {'Sales Order':['so_required', 'is_pos'],'Delivery Note':['dn_required', 'update_stock']}
for i in dic:
if frappe.db.get_value('Selling Settings', None, dic[i][0]) == 'Yes':
for d in self.get('items'):
if frappe.db.get_value('Item', d.item_code, 'is_stock_item') == 1 \
and not d.get(i.lower().replace(' ','_')) and not self.get(dic[i][1]):
msgprint(_("{0} is mandatory for Item {1}").format(i,d.item_code), raise_exception=1)
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or customer is null or customer = '')""",
(self.project, self.customer))
if not res:
throw(_("Customer {0} does not belong to project {1}").format(self.customer,self.project))
def validate_pos(self):
if self.is_return:
if flt(self.paid_amount) + flt(self.write_off_amount) - flt(self.grand_total) < \
1/(10**(self.precision("grand_total") + 1)):
frappe.throw(_("Paid amount + Write Off Amount can not be greater than Grand Total"))
def validate_item_code(self):
for d in self.get('items'):
if not d.item_code:
msgprint(_("Item Code required at Row No {0}").format(d.idx), raise_exception=True)
def validate_warehouse(self):
super(SalesInvoice, self).validate_warehouse()
for d in self.get_item_list():
if not d.warehouse and frappe.db.get_value("Item", d.item_code, "is_stock_item"):
frappe.throw(_("Warehouse required for stock Item {0}").format(d.item_code))
def validate_delivery_note(self):
for d in self.get("items"):
if d.delivery_note:
msgprint(_("Stock cannot be updated against Delivery Note {0}").format(d.delivery_note), raise_exception=1)
def validate_write_off_account(self):
if flt(self.write_off_amount) and not self.write_off_account:
self.write_off_account = frappe.db.get_value('Company', self.company, 'write_off_account')
if flt(self.write_off_amount) and not self.write_off_account:
msgprint(_("Please enter Write Off Account"), raise_exception=1)
def validate_account_for_change_amount(self):
if flt(self.change_amount) and not self.account_for_change_amount:
msgprint(_("Please enter Account for Change Amount"), raise_exception=1)
def validate_c_form(self):
""" Blank C-form no if C-form applicable marked as 'No'"""
if self.amended_from and self.c_form_applicable == 'No' and self.c_form_no:
frappe.db.sql("""delete from `tabC-Form Invoice Detail` where invoice_no = %s
and parent = %s""", (self.amended_from, self.c_form_no))
frappe.db.set(self, 'c_form_no', '')
def validate_c_form_on_cancel(self):
""" Display message if C-Form no exists on cancellation of Sales Invoice"""
if self.c_form_applicable == 'Yes' and self.c_form_no:
msgprint(_("Please remove this Invoice {0} from C-Form {1}")
.format(self.name, self.c_form_no), raise_exception = 1)
def validate_dropship_item(self):
for item in self.items:
if item.sales_order:
if frappe.db.get_value("Sales Order Item", item.so_detail, "delivered_by_supplier"):
frappe.throw(_("Could not update stock, invoice contains drop shipping item."))
def update_current_stock(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
for d in self.get('packed_items'):
bin = frappe.db.sql("select actual_qty, projected_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
d.projected_qty = bin and flt(bin[0]['projected_qty']) or 0
def update_packing_list(self):
if cint(self.update_stock) == 1:
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
else:
self.set('packed_items', [])
def set_billing_hours_and_amount(self):
if not self.project:
for timesheet in self.timesheets:
ts_doc = frappe.get_doc('Timesheet', timesheet.time_sheet)
if not timesheet.billing_hours and ts_doc.total_billable_hours:
timesheet.billing_hours = ts_doc.total_billable_hours
if not timesheet.billing_amount and ts_doc.total_billable_amount:
timesheet.billing_amount = ts_doc.total_billable_amount
def update_timesheet_billing_for_project(self):
if not self.timesheets and self.project:
self.add_timesheet_data()
else:
self.calculate_billing_amount_for_timesheet()
def add_timesheet_data(self):
self.set('timesheets', [])
if self.project:
for data in get_projectwise_timesheet_data(self.project):
self.append('timesheets', {
'time_sheet': data.parent,
'billing_hours': data.billing_hours,
'billing_amount': data.billing_amt,
'timesheet_detail': data.name
})
self.calculate_billing_amount_for_timesheet()
def calculate_billing_amount_for_timesheet(self):
total_billing_amount = 0.0
for data in self.timesheets:
if data.billing_amount:
total_billing_amount += data.billing_amount
self.total_billing_amount = total_billing_amount
def get_warehouse(self):
user_pos_profile = frappe.db.sql("""select name, warehouse from `tabPOS Profile`
where ifnull(user,'') = %s and company = %s""", (frappe.session['user'], self.company))
warehouse = user_pos_profile[0][1] if user_pos_profile else None
if not warehouse:
global_pos_profile = frappe.db.sql("""select name, warehouse from `tabPOS Profile`
where (user is null or user = '') and company = %s""", self.company)
if global_pos_profile:
warehouse = global_pos_profile[0][1]
elif not user_pos_profile:
msgprint(_("POS Profile required to make POS Entry"), raise_exception=True)
return warehouse
def set_income_account_for_fixed_assets(self):
disposal_account = depreciation_cost_center = None
for d in self.get("items"):
if d.is_fixed_asset:
if not disposal_account:
disposal_account, depreciation_cost_center = get_disposal_account_and_cost_center(self.company)
d.income_account = disposal_account
if not d.cost_center:
d.cost_center = depreciation_cost_center
def check_prev_docstatus(self):
for d in self.get('items'):
if d.sales_order and frappe.db.get_value("Sales Order", d.sales_order, "docstatus") != 1:
frappe.throw(_("Sales Order {0} is not submitted").format(d.sales_order))
if d.delivery_note and frappe.db.get_value("Delivery Note", d.delivery_note, "docstatus") != 1:
throw(_("Delivery Note {0} is not submitted").format(d.delivery_note))
def make_gl_entries(self, gl_entries=None, repost_future_gle=True, from_repost=False):
auto_accounting_for_stock = erpnext.is_perpetual_inventory_enabled(self.company)
if not self.grand_total:
return
if not gl_entries:
gl_entries = self.get_gl_entries()
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
# if POS and amount is written off, updating outstanding amt after posting all gl entries
update_outstanding = "No" if (cint(self.is_pos) or self.write_off_account) else "Yes"
make_gl_entries(gl_entries, cancel=(self.docstatus == 2),
update_outstanding=update_outstanding, merge_entries=False)
if update_outstanding == "No":
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
update_outstanding_amt(self.debit_to, "Customer", self.customer,
self.doctype, self.return_against if cint(self.is_return) else self.name)
if repost_future_gle and cint(self.update_stock) \
and cint(auto_accounting_for_stock):
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items)
elif self.docstatus == 2 and cint(self.update_stock) \
and cint(auto_accounting_for_stock):
from erpnext.accounts.general_ledger import delete_gl_entries
delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import merge_similar_entries
gl_entries = []
self.make_customer_gl_entry(gl_entries)
self.make_tax_gl_entries(gl_entries)
self.make_item_gl_entries(gl_entries)
# merge gl entries before adding pos entries
gl_entries = merge_similar_entries(gl_entries)
self.make_pos_gl_entries(gl_entries)
self.make_gle_for_change_amount(gl_entries)
self.make_write_off_gl_entry(gl_entries)
self.make_gle_for_rounding_adjustment(gl_entries)
return gl_entries
def make_customer_gl_entry(self, gl_entries):
if self.grand_total:
# Didnot use base_grand_total to book rounding loss gle
grand_total_in_company_currency = flt(self.grand_total * self.conversion_rate,
self.precision("grand_total"))
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.against_income_account,
"debit": grand_total_in_company_currency,
"debit_in_account_currency": grand_total_in_company_currency \
if self.party_account_currency==self.company_currency else self.grand_total,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
def make_tax_gl_entries(self, gl_entries):
for tax in self.get("taxes"):
if flt(tax.base_tax_amount_after_discount_amount):
account_currency = get_account_currency(tax.account_head)
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.customer,
"credit": flt(tax.base_tax_amount_after_discount_amount),
"credit_in_account_currency": flt(tax.base_tax_amount_after_discount_amount) \
if account_currency==self.company_currency else flt(tax.tax_amount_after_discount_amount),
"cost_center": tax.cost_center
}, account_currency)
)
def make_item_gl_entries(self, gl_entries):
# income account gl entries
for item in self.get("items"):
if flt(item.base_net_amount):
if item.is_fixed_asset:
asset = frappe.get_doc("Asset", item.asset)
fixed_asset_gl_entries = get_gl_entries_on_asset_disposal(asset, item.base_net_amount)
for gle in fixed_asset_gl_entries:
gle["against"] = self.customer
gl_entries.append(self.get_gl_dict(gle))
asset.db_set("disposal_date", self.posting_date)
asset.set_status("Sold" if self.docstatus==1 else None)
else:
account_currency = get_account_currency(item.income_account)
gl_entries.append(
self.get_gl_dict({
"account": item.income_account,
"against": self.customer,
"credit": item.base_net_amount,
"credit_in_account_currency": item.base_net_amount \
if account_currency==self.company_currency else item.net_amount,
"cost_center": item.cost_center
}, account_currency)
)
# expense account gl entries
if cint(self.update_stock) and \
erpnext.is_perpetual_inventory_enabled(self.company):
gl_entries += super(SalesInvoice, self).get_gl_entries()
def make_pos_gl_entries(self, gl_entries):
if cint(self.is_pos):
for payment_mode in self.payments:
if payment_mode.amount:
# POS, make payment entries
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": payment_mode.account,
"credit": payment_mode.base_amount,
"credit_in_account_currency": payment_mode.base_amount \
if self.party_account_currency==self.company_currency \
else payment_mode.amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype,
}, self.party_account_currency)
)
payment_mode_account_currency = get_account_currency(payment_mode.account)
gl_entries.append(
self.get_gl_dict({
"account": payment_mode.account,
"against": self.customer,
"debit": payment_mode.base_amount,
"debit_in_account_currency": payment_mode.base_amount \
if payment_mode_account_currency==self.company_currency \
else payment_mode.amount
}, payment_mode_account_currency)
)
def make_gle_for_change_amount(self, gl_entries):
if cint(self.is_pos) and self.change_amount:
if self.account_for_change_amount:
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.account_for_change_amount,
"debit": flt(self.base_change_amount),
"debit_in_account_currency": flt(self.base_change_amount) \
if self.party_account_currency==self.company_currency else flt(self.change_amount),
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.account_for_change_amount,
"against": self.customer,
"credit": self.base_change_amount
})
)
else:
frappe.throw(_("Select change amount account"), title="Mandatory Field")
def make_write_off_gl_entry(self, gl_entries):
# write off entries, applicable if only pos
if self.write_off_account and self.write_off_amount:
write_off_account_currency = get_account_currency(self.write_off_account)
default_cost_center = frappe.db.get_value('Company', self.company, 'cost_center')
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"party_type": "Customer",
"party": self.customer,
"against": self.write_off_account,
"credit": self.base_write_off_amount,
"credit_in_account_currency": self.base_write_off_amount \
if self.party_account_currency==self.company_currency else self.write_off_amount,
"against_voucher": self.return_against if cint(self.is_return) else self.name,
"against_voucher_type": self.doctype
}, self.party_account_currency)
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.customer,
"debit": self.base_write_off_amount,
"debit_in_account_currency": self.base_write_off_amount \
if write_off_account_currency==self.company_currency else self.write_off_amount,
"cost_center": self.write_off_cost_center or default_cost_center
}, write_off_account_currency)
)
def make_gle_for_rounding_adjustment(self, gl_entries):
if self.rounding_adjustment:
round_off_account, round_off_cost_center = \
get_round_off_account_and_cost_center(self.company)
gl_entries.append(
self.get_gl_dict({
"account": round_off_account,
"against": self.customer,
"credit_in_account_currency": self.rounding_adjustment,
"credit": self.base_rounding_adjustment,
"cost_center": round_off_cost_center,
}
))
def update_billing_status_in_dn(self, update_modified=True):
updated_delivery_notes = []
for d in self.get("items"):
if d.dn_detail:
billed_amt = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item`
where dn_detail=%s and docstatus=1""", d.dn_detail)
billed_amt = billed_amt and billed_amt[0][0] or 0
frappe.db.set_value("Delivery Note Item", d.dn_detail, "billed_amt", billed_amt, update_modified=update_modified)
updated_delivery_notes.append(d.delivery_note)
elif d.so_detail:
updated_delivery_notes += update_billed_amount_based_on_so(d.so_detail, update_modified)
for dn in set(updated_delivery_notes):
frappe.get_doc("Delivery Note", dn).update_billing_percentage(update_modified=update_modified)
def on_recurring(self, reference_doc, subscription_doc):
for fieldname in ("c_form_applicable", "c_form_no", "write_off_amount"):
self.set(fieldname, reference_doc.get(fieldname))
self.due_date = None
def update_serial_no(self, in_cancel=False):
""" update Sales Invoice refrence in Serial No """
invoice = None if (in_cancel or self.is_return) else self.name
if in_cancel and self.is_return:
invoice = self.return_against
for item in self.items:
if not item.serial_no:
continue
for serial_no in item.serial_no.split("\n"):
if serial_no and frappe.db.exists('Serial No', serial_no):
sno = frappe.get_doc('Serial No', serial_no)
sno.sales_invoice = invoice
sno.db_update()
def validate_serial_numbers(self):
"""
validate serial number agains Delivery Note and Sales Invoice
"""
self.set_serial_no_against_delivery_note()
self.validate_serial_against_delivery_note()
self.validate_serial_against_sales_invoice()
def set_serial_no_against_delivery_note(self):
for item in self.items:
if item.serial_no and item.delivery_note and \
item.qty != len(get_serial_nos(item.serial_no)):
item.serial_no = get_delivery_note_serial_no(item.item_code, item.qty, item.delivery_note)
def validate_serial_against_delivery_note(self):
"""
validate if the serial numbers in Sales Invoice Items are same as in
Delivery Note Item
"""
for item in self.items:
if not item.delivery_note or not item.dn_detail:
continue
serial_nos = frappe.db.get_value("Delivery Note Item", item.dn_detail, "serial_no") or ""
dn_serial_nos = set(get_serial_nos(serial_nos))
serial_nos = item.serial_no or ""
si_serial_nos = set(get_serial_nos(serial_nos))
if si_serial_nos - dn_serial_nos:
frappe.throw(_("Serial Numbers in row {0} does not match with Delivery Note".format(item.idx)))
if item.serial_no and cint(item.qty) != len(si_serial_nos):
frappe.throw(_("Row {0}: {1} Serial numbers required for Item {2}. You have provided {3}.".format(
item.idx, item.qty, item.item_code, len(si_serial_nos))))
def validate_serial_against_sales_invoice(self):
""" check if serial number is already used in other sales invoice """
for item in self.items:
if not item.serial_no:
continue
for serial_no in item.serial_no.split("\n"):
sales_invoice = frappe.db.get_value("Serial No", serial_no, "sales_invoice")
if sales_invoice and self.name != sales_invoice:
frappe.throw(_("Serial Number: {0} is already referenced in Sales Invoice: {1}".format(
serial_no, sales_invoice
)))
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context.update({
'show_sidebar': True,
'show_search': True,
'no_breadcrumbs': True,
'title': _('Invoices'),
})
return list_context
@frappe.whitelist()
def get_bank_cash_account(mode_of_payment, company):
account = frappe.db.get_value("Mode of Payment Account",
{"parent": mode_of_payment, "company": company}, "default_account")
if not account:
frappe.throw(_("Please set default Cash or Bank account in Mode of Payment {0}")
.format(mode_of_payment))
return {
"account": account
}
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = flt(source_doc.qty) - flt(source_doc.delivered_qty)
target_doc.stock_qty = target_doc.qty * flt(source_doc.conversion_factor)
target_doc.base_amount = target_doc.qty * flt(source_doc.base_rate)
target_doc.amount = target_doc.qty * flt(source_doc.rate)
doclist = get_mapped_doc("Sales Invoice", source_name, {
"Sales Invoice": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Invoice Item": {
"doctype": "Delivery Note Item",
"field_map": {
"name": "si_detail",
"parent": "against_sales_invoice",
"serial_no": "serial_no",
"sales_order": "against_sales_order",
"so_detail": "so_detail",
"cost_center": "cost_center"
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_by_supplier!=1
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def make_sales_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Sales Invoice", source_name, target_doc)
def set_account_for_mode_of_payment(self):
for data in self.payments:
if not data.account:
data.account = get_bank_cash_account(data.mode_of_payment, self.company).get("account")
| gpl-3.0 | -4,458,028,978,484,492,300 | 35.605561 | 153 | 0.692044 | false |
tavisrudd/eventlet | eventlet/convenience.py | 1 | 4364 | import sys
from eventlet import greenio
from eventlet import greenthread
from eventlet import greenpool
from eventlet.green import socket
from eventlet.support import greenlets as greenlet
def connect(addr, family=socket.AF_INET, bind=None):
"""Convenience function for opening client sockets.
:param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param bind: Local address to bind to, optional.
:return: The connected green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
if bind is not None:
sock.bind(bind)
sock.connect(addr)
return sock
def listen(addr, family=socket.AF_INET, backlog=50):
"""Convenience function for opening server sockets. This
socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
Sets SO_REUSEADDR on the socket to save on annoyance.
:param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
:param backlog: The maximum number of queued connections. Should be at least 1; the maximum value is system-dependent.
:return: The listening green socket object.
"""
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(backlog)
return sock
class StopServe(Exception):
"""Exception class used for quitting :func:`~eventlet.serve` gracefully."""
pass
def _stop_checker(t, server_gt, conn):
try:
try:
t.wait()
finally:
conn.close()
except greenlet.GreenletExit:
pass
except Exception:
greenthread.kill(server_gt, *sys.exc_info())
def serve(sock, handle, concurrency=1000):
"""Runs a server on the supplied socket. Calls the function *handle* in a
separate greenthread for every incoming client connection. *handle* takes
two arguments: the client socket object, and the client address::
def myhandle(client_sock, client_addr):
print "client connected", client_addr
eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle)
Returning from *handle* closes the client socket.
:func:`serve` blocks the calling greenthread; it won't return until
the server completes. If you desire an immediate return,
spawn a new greenthread for :func:`serve`.
Any uncaught exceptions raised in *handle* are raised as exceptions
from :func:`serve`, terminating the server, so be sure to be aware of the
exceptions your application can raise. The return value of *handle* is
ignored.
Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the
server -- that's the only way to get the server() function to return rather
than raise.
The value in *concurrency* controls the maximum number of
greenthreads that will be open at any time handling requests. When
the server hits the concurrency limit, it stops accepting new
connections until the existing ones complete.
"""
pool = greenpool.GreenPool(concurrency)
server_gt = greenthread.getcurrent()
while True:
try:
conn, addr = sock.accept()
gt = pool.spawn(handle, conn, addr)
gt.link(_stop_checker, server_gt, conn)
conn, addr, gt = None, None, None
except StopServe:
return
def wrap_ssl(sock, keyfile=None, certfile=None, server_side=False,
cert_reqs=None, ssl_version=None, ca_certs=None,
do_handshake_on_connect=True, suppress_ragged_eofs=True):
"""Convenience function for converting a regular socket into an SSL
socket. Has the same interface as :func:`ssl.wrap_socket`, but
works on 2.5 or earlier, using PyOpenSSL.
The preferred idiom is to call wrap_ssl directly on the creation
method, e.g., ``wrap_ssl(connect(addr))`` or
``wrap_ssl(listen(addr), server_side=True)``. This way there is
no "naked" socket sitting around to accidentally corrupt the SSL
session.
:return Green SSL object.
"""
pass
| mit | 2,061,297,895,791,826,400 | 37.280702 | 122 | 0.683089 | false |
zstars/weblabdeusto | server/src/weblab/core/coordinator/redis/priority_queue_scheduler.py | 1 | 33851 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
import time
import datetime
import random
import json
from voodoo.log import logged
import voodoo.log as log
from voodoo.typechecker import typecheck
from voodoo.gen import CoordAddress
import voodoo.sessions.session_id as SessionId
from voodoo.override import Override
from weblab.core.coordinator.exc import ExpiredSessionError
from weblab.core.coordinator.scheduler_transactions_synchronizer import SchedulerTransactionsSynchronizer
from weblab.core.coordinator.scheduler import Scheduler
import weblab.core.coordinator.status as WSS
from weblab.core.coordinator.resource import Resource
from weblab.data.experiments import ExperimentInstanceId, ExperimentId
from weblab.core.coordinator.redis.constants import (
WEBLAB_RESOURCE_RESERVATION_PQUEUE,
WEBLAB_RESOURCE_SLOTS,
WEBLAB_RESOURCE_RESERVATIONS,
WEBLAB_RESOURCE_PQUEUE_RESERVATIONS,
WEBLAB_RESOURCE_PQUEUE_POSITIONS,
WEBLAB_RESOURCE_PQUEUE_MAP,
WEBLAB_RESOURCE_PQUEUE_SORTED,
WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS,
LAB_COORD,
CLIENT_INITIAL_DATA,
REQUEST_INFO,
EXPERIMENT_TYPE,
EXPERIMENT_INSTANCE,
START_TIME,
TIME,
INITIALIZATION_IN_ACCOUNTING,
PRIORITY,
TIMESTAMP_BEFORE,
TIMESTAMP_AFTER,
LAB_SESSION_ID,
EXP_INFO,
INITIAL_CONFIGURATION,
RESOURCE_INSTANCE,
ACTIVE_STATUS,
STATUS_RESERVED,
STATUS_WAITING_CONFIRMATION,
)
EXPIRATION_TIME = 3600 # seconds
DEBUG = False
###########################################################
#
# TODO write some documentation
#
def exc_checker(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
if DEBUG:
import traceback
traceback.print_exc()
log.log(
PriorityQueueScheduler, log.level.Error,
"Unexpected exception while running %s" % func.__name__ )
log.log_exc(PriorityQueueScheduler, log.level.Warning)
raise
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
return wrapper
TIME_ANTI_RACE_CONDITIONS = 0.1
class PriorityQueueScheduler(Scheduler):
def __init__(self, generic_scheduler_arguments, randomize_instances = True, **kwargs):
super(PriorityQueueScheduler, self).__init__(generic_scheduler_arguments, **kwargs)
self.randomize_instances = randomize_instances
self._synchronizer = SchedulerTransactionsSynchronizer(self)
self._synchronizer.start()
@Override(Scheduler)
def stop(self):
self._synchronizer.stop()
@Override(Scheduler)
def is_remote(self):
return False
@exc_checker
@logged()
@Override(Scheduler)
@typecheck(typecheck.ANY, typecheck.ANY, Resource)
def removing_current_resource_slot(self, client, resource):
weblab_resource_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (resource.resource_type, resource.resource_instance)
current_reservation_ids = client.smembers(weblab_resource_instance_reservations)
if len(current_reservation_ids) > 0:
current_reservation_id = list(current_reservation_ids)[0]
if client.srem(weblab_resource_instance_reservations, current_reservation_id):
self.reservations_manager.downgrade_confirmation(current_reservation_id)
self.resources_manager.release_resource(resource)
# Remove data that was added when confirmed
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, current_reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
reservation_data = json.loads(reservation_data_str)
reservation_data.pop(ACTIVE_STATUS, None)
reservation_data.pop(TIMESTAMP_BEFORE, None)
reservation_data.pop(TIMESTAMP_AFTER, None)
reservation_data.pop(LAB_SESSION_ID, None)
reservation_data.pop(EXP_INFO, None)
reservation_data_str = json.dumps(reservation_data)
reservation_data = client.set(weblab_reservation_pqueue, reservation_data_str)
# Add back to the queue
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
filled_reservation_id = client.hget(weblab_resource_pqueue_map, current_reservation_id)
client.zadd(weblab_resource_pqueue_sorted, filled_reservation_id, -1)
return True
return False
@exc_checker
@logged()
@Override(Scheduler)
def reserve_experiment(self, reservation_id, experiment_id, time, priority, initialization_in_accounting, client_initial_data, request_info):
"""
priority: the less, the more priority
"""
client = self.redis_maker()
# For indexing purposes
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
weblab_resource_reservations = WEBLAB_RESOURCE_RESERVATIONS % self.resource_type_name
# Queue management
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
weblab_resource_pqueue_positions = WEBLAB_RESOURCE_PQUEUE_POSITIONS % self.resource_type_name
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
# Within the same priority, we want all to sort all the requests by the order they came.
# In order to support this, we increase a long enough value and put it before the reservaiton_id
current_position = client.incr(weblab_resource_pqueue_positions)
filled_reservation_id = "%s_%s" % (str(current_position).zfill(100), reservation_id)
pipeline = client.pipeline()
pipeline.hset(weblab_resource_pqueue_map, reservation_id, filled_reservation_id)
pipeline.zadd(weblab_resource_pqueue_sorted, filled_reservation_id, priority)
pipeline.sadd(weblab_resource_reservations, reservation_id)
pipeline.sadd(weblab_resource_pqueue_reservations, reservation_id)
generic_data = {
TIME : time,
INITIALIZATION_IN_ACCOUNTING : initialization_in_accounting,
PRIORITY : priority,
}
pipeline.set(weblab_reservation_pqueue, json.dumps(generic_data))
pipeline.execute()
return self.get_reservation_status(reservation_id)
#######################################################################
#
# Given a reservation_id, it returns in which state the reservation is
#
@exc_checker
@logged()
@Override(Scheduler)
def get_reservation_status(self, reservation_id):
self._remove_expired_reservations()
expired = self.reservations_manager.update(reservation_id)
if expired:
self._delete_reservation(reservation_id)
raise ExpiredSessionError("Expired reservation")
self._synchronizer.request_and_wait()
reservation_id_with_route = '%s;%s.%s' % (reservation_id, reservation_id, self.core_server_route)
client = self.redis_maker()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
if reservation_data_str is None:
log.log(
PriorityQueueScheduler, log.level.Error,
"get_reservation_status called with a reservation_id that is not registered (not found on weblab_reservation_pqueue). Returning a WaitingInstanceStatus")
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, 50)
reservation_data = json.loads(reservation_data_str)
if ACTIVE_STATUS in reservation_data:
# Reserved or Waiting reservation
status = reservation_data[ACTIVE_STATUS]
# It may be just waiting for the experiment server to respond
if status == STATUS_WAITING_CONFIRMATION:
return WSS.WaitingConfirmationQueueStatus(reservation_id_with_route, self.core_server_url)
# Or the experiment server already responded and therefore we have all this data
str_lab_coord_address = reservation_data[LAB_COORD]
obtained_time = reservation_data[TIME]
initialization_in_accounting = reservation_data[INITIALIZATION_IN_ACCOUNTING]
lab_session_id = reservation_data[LAB_SESSION_ID]
initial_configuration = reservation_data[INITIAL_CONFIGURATION]
timestamp_before_tstamp = reservation_data[TIMESTAMP_BEFORE]
timestamp_after_tstamp = reservation_data[TIMESTAMP_AFTER]
if EXP_INFO in reservation_data and reservation_data[EXP_INFO]:
exp_info = json.loads(reservation_data[EXP_INFO])
else:
exp_info = {}
timestamp_before = datetime.datetime.fromtimestamp(timestamp_before_tstamp)
timestamp_after = datetime.datetime.fromtimestamp(timestamp_after_tstamp)
lab_coord_address = CoordAddress.translate(str_lab_coord_address)
if initialization_in_accounting:
before = timestamp_before_tstamp
else:
before = timestamp_after_tstamp
if before is not None:
remaining = (before + obtained_time) - self.time_provider.get_time()
else:
remaining = obtained_time
return WSS.LocalReservedStatus(reservation_id_with_route, lab_coord_address, SessionId.SessionId(lab_session_id), exp_info, obtained_time, initial_configuration, timestamp_before, timestamp_after, initialization_in_accounting, remaining, self.core_server_url)
# else it's waiting
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
filled_reservation_id = client.hget(weblab_resource_pqueue_map, reservation_id)
if filled_reservation_id is None:
log.log(
PriorityQueueScheduler, log.level.Error,
"get_reservation_status called with a reservation_id that is not registered (not found on the reservations map). Returning a WaitingInstanceStatus")
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, 50)
position = client.zrank(weblab_resource_pqueue_sorted, filled_reservation_id)
if position is None: # It's not in the queue now
time.sleep(TIME_ANTI_RACE_CONDITIONS * random.random())
return self.get_reservation_status(reservation_id)
if self.resources_manager.are_resource_instances_working(self.resource_type_name):
return WSS.WaitingQueueStatus(reservation_id_with_route, position)
else:
return WSS.WaitingInstancesQueueStatus(reservation_id_with_route, position)
################################################################
#
# Called when it is confirmed by the Laboratory Server.
#
@exc_checker
@logged()
@Override(Scheduler)
def confirm_experiment(self, reservation_id, lab_session_id, initial_configuration, exp_info):
self._remove_expired_reservations()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
client = self.redis_maker()
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
return
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
resource_instance_str = pqueue_reservation_data.get(RESOURCE_INSTANCE)
if resource_instance_str is not None:
resource_instance = Resource.parse(resource_instance_str)
if not self.resources_manager.check_working(resource_instance):
# TODO: if the experiment is broken and the student is ACTIVE_STATUS, something should be done
#
return
pqueue_reservation_data[LAB_SESSION_ID] = lab_session_id.id
pqueue_reservation_data[INITIAL_CONFIGURATION] = initial_configuration
pqueue_reservation_data[TIMESTAMP_AFTER] = self.time_provider.get_time()
pqueue_reservation_data[ACTIVE_STATUS] = STATUS_RESERVED
pqueue_reservation_data[EXP_INFO] = json.dumps(exp_info)
pqueue_reservation_data_str = json.dumps(pqueue_reservation_data)
client.set(weblab_reservation_pqueue, pqueue_reservation_data_str)
################################################################
#
# Called when the user disconnects or finishes the resource.
#
@exc_checker
@logged()
@Override(Scheduler)
def finish_reservation(self, reservation_id):
self._remove_expired_reservations()
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
client = self.redis_maker()
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
return
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
if ACTIVE_STATUS in pqueue_reservation_data:
enqueue_free_experiment_args = self._clean_current_reservation(reservation_id)
else:
enqueue_free_experiment_args = None
self._delete_reservation(reservation_id)
if enqueue_free_experiment_args is not None:
self.confirmer.enqueue_free_experiment(*enqueue_free_experiment_args)
def _clean_current_reservation(self, reservation_id):
client = self.redis_maker()
enqueue_free_experiment_args = None
if reservation_id is not None:
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
reservation_data_str = client.get(weblab_reservation_pqueue)
if reservation_data_str is not None:
downgraded = self.reservations_manager.downgrade_confirmation(reservation_id)
if downgraded:
reservation_data = json.loads(reservation_data_str)
resource_instance_str = reservation_data.get(RESOURCE_INSTANCE)
if resource_instance_str is not None:
resource_instance = Resource.parse(resource_instance_str)
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (resource_instance.resource_type, resource_instance.resource_instance)
client.srem(weblab_resource_pqueue_instance_reservations, reservation_id)
# print "RELEASING AT _clean_current_reservation. SHOULD NEVER HAPPEN."
# self.resources_manager.release_resource(resource_instance)
lab_session_id = reservation_data.get(LAB_SESSION_ID)
experiment_instance_str = reservation_data.get(EXPERIMENT_INSTANCE)
experiment_instance_id = ExperimentInstanceId.parse(experiment_instance_str)
if experiment_instance_id is not None:
# If the experiment instance doesn't exist, there is no need to call the free_experiment method
lab_coord_address = reservation_data.get(LAB_COORD)
enqueue_free_experiment_args = (lab_coord_address, reservation_id, lab_session_id, experiment_instance_id)
# otherwise the student has been removed
return enqueue_free_experiment_args
def update(self):
self._update_queues()
#############################################################
#
# Take the queue of a given Resource Type and update it
#
@exc_checker
def _update_queues(self):
###########################################################
# There are reasons why a waiting reservation may not be
# able to be promoted while the next one is. For instance,
# if a user is waiting for "pld boards", but only for
# instances of "pld boards" which have a "ud-binary@Binary
# experiments" server running. If only a "ud-pld@PLD
# Experiments" is available, then this user will not be
# promoted and the another user which is waiting for a
# "ud-pld@PLD Experiments" can be promoted.
#
# Therefore, we have a list of the IDs of the waiting
# reservations we previously thought that they couldn't be
# promoted in this iteration. They will have another
# chance in the next run of _update_queues.
#
previously_waiting_reservation_ids = []
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
weblab_resource_slots = WEBLAB_RESOURCE_SLOTS % self.resource_type_name
###########################################################
# While there are free instances and waiting reservations,
# take the first waiting reservation and set it to current
# reservation. Make this repeatedly because we want to
# commit each change
#
while True:
client = self.redis_maker()
filled_waiting_reservation_ids = client.zrangebyscore(weblab_resource_pqueue_sorted, -10000, +10000, start=0, num=len(previously_waiting_reservation_ids) + 1)
first_waiting_reservation_id = None
for filled_waiting_reservation_id in filled_waiting_reservation_ids:
waiting_reservation_id = filled_waiting_reservation_id[filled_waiting_reservation_id.find('_')+1:]
if waiting_reservation_id not in previously_waiting_reservation_ids:
first_waiting_reservation_id = waiting_reservation_id
break
if first_waiting_reservation_id is None:
return # There is no waiting reservation for this resource that we haven't already tried
previously_waiting_reservation_ids.append(first_waiting_reservation_id)
#
# For the current resource_type, let's ask for
# all the resource instances available (i.e. those
# who are a member on weblab:resource:%s:slots )
#
free_instances = [ Resource(self.resource_type_name, resource_instance)
for resource_instance in client.smembers(weblab_resource_slots) ]
if len(free_instances) == 0:
# If there is no free instance, just return
return
#
# Select the correct free_instance for the current student among
# all the free_instances
#
if self.randomize_instances:
randomized_free_instances = [ free_instance for free_instance in free_instances ]
random.shuffle(randomized_free_instances)
else:
randomized_free_instances = sorted(free_instances, cmp=lambda r1, r2: cmp(r1.resource_type, r2.resource_type) or cmp(r1.resource_instance, r2.resource_instance))
for free_instance in randomized_free_instances:
#
# IMPORTANT: from here on every "continue" should first revoke the
# reservations_manager and resources_manager confirmations
#
working = self.resources_manager.check_working(free_instance)
if not working:
# The instance is not working
continue
confirmed = self.reservations_manager.confirm(first_waiting_reservation_id)
if not confirmed:
# student has already been confirmed somewhere else, so don't try with other
# instances, but rather with other student
break
acquired = self.resources_manager.acquire_resource(free_instance)
# print "ACQUIRED", free_instance, acquired, time.time()
if not acquired:
# the instance has been acquired by someone else. unconfirm student and
# try again with other free_instance
self.reservations_manager.downgrade_confirmation(first_waiting_reservation_id)
continue
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, free_instance.resource_instance)
client.sadd(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, first_waiting_reservation_id)
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
reservation_data = self.reservations_manager.get_reservation_data(first_waiting_reservation_id)
if pqueue_reservation_data_str is None or reservation_data is None:
# the student is not here anymore; downgrading confirmation is not required
# but releasing the resource is; and skip the rest of the free instances
self.resources_manager.release_resource(free_instance)
client.srem(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
break
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
start_time = self.time_provider.get_time()
total_time = pqueue_reservation_data[TIME]
pqueue_reservation_data[START_TIME] = start_time
pqueue_reservation_data[TIMESTAMP_BEFORE] = start_time
pqueue_reservation_data[ACTIVE_STATUS] = STATUS_WAITING_CONFIRMATION
pqueue_reservation_data[RESOURCE_INSTANCE] = free_instance.to_weblab_str()
initialization_in_accounting = pqueue_reservation_data[INITIALIZATION_IN_ACCOUNTING]
client_initial_data = reservation_data[CLIENT_INITIAL_DATA]
request_info = json.loads(reservation_data[REQUEST_INFO])
username = request_info.get('username')
locale = request_info.get('locale')
requested_experiment_type = ExperimentId.parse(reservation_data[EXPERIMENT_TYPE])
selected_experiment_instance = None
experiment_instances = self.resources_manager.list_experiment_instance_ids_by_resource(free_instance)
for experiment_instance in experiment_instances:
if experiment_instance.to_experiment_id() == requested_experiment_type:
selected_experiment_instance = experiment_instance
if selected_experiment_instance is None:
# This resource is not valid for this user, other free_instance should be
# selected. Try with other, but first clean the acquired resources
self.reservations_manager.downgrade_confirmation(first_waiting_reservation_id)
self.resources_manager.release_resource(free_instance)
client.srem(weblab_resource_pqueue_instance_reservations, first_waiting_reservation_id)
continue
pqueue_reservation_data[EXPERIMENT_INSTANCE] = selected_experiment_instance.to_weblab_str()
laboratory_coord_address = self.resources_manager.get_laboratory_coordaddress_by_experiment_instance_id(selected_experiment_instance)
pqueue_reservation_data[LAB_COORD] = laboratory_coord_address
client.set(weblab_reservation_pqueue, json.dumps(pqueue_reservation_data))
filled_reservation_id = client.hget(weblab_resource_pqueue_map, first_waiting_reservation_id)
client.zrem(weblab_resource_pqueue_sorted, filled_reservation_id)
#
# Enqueue the confirmation, since it might take a long time
# (for instance, if the laboratory server does not reply because
# of any network problem, or it just takes too much in replying),
# so this method might take too long. That's why we enqueue these
# petitions and run them in other threads.
#
deserialized_server_initial_data = {
'priority.queue.slot.length' : '%s' % total_time,
'priority.queue.slot.start' : '%s' % datetime.datetime.fromtimestamp(start_time),
'priority.queue.slot.initialization_in_accounting' : initialization_in_accounting,
'request.experiment_id.experiment_name' : selected_experiment_instance.exp_name,
'request.experiment_id.category_name' : selected_experiment_instance.cat_name,
'request.username' : username,
'request.full_name' : username,
'request.locale' : locale,
# TODO: add the username and user full name here
}
server_initial_data = json.dumps(deserialized_server_initial_data)
# server_initial_data will contain information such as "what was the last experiment used?".
# If a single resource was used by a binary experiment, then the next time may not require reprogramming the device
self.confirmer.enqueue_confirmation(laboratory_coord_address, first_waiting_reservation_id, selected_experiment_instance, client_initial_data, server_initial_data, self.resource_type_name)
#
# After it, keep in the while True in order to add the next
# reservation
#
break
################################################
#
# Remove all reservations whose session has expired
#
@exc_checker
def _remove_expired_reservations(self):
now = self.time_provider.get_time()
enqueue_free_experiment_args_retrieved = []
client = self.redis_maker()
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
reservations = [ reservation_id for reservation_id in client.smembers(weblab_resource_pqueue_reservations) ]
# Since there might be a lot of reservations, create a pipeline and retrieve
# every reservation data in a row
pipeline = client.pipeline()
for reservation_id in reservations:
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
pipeline.get(weblab_reservation_pqueue)
results = pipeline.execute()
for reservation_id, reservation_data in zip(reservations, results):
if reservation_data is not None:
data = json.loads(reservation_data)
if ACTIVE_STATUS in data:
total_time = data[TIME]
timestamp_before = data[TIMESTAMP_BEFORE]
timestamp_after = data.get(TIMESTAMP_AFTER)
initialization_in_accounting = data[INITIALIZATION_IN_ACCOUNTING]
# if timestamp_after is None and initialization should not be considered,
# then we can not calculate if the time has expired, so we skip it (it will
# be considered as expired for lack of LATEST_ACCESS
if timestamp_after is not None or initialization_in_accounting:
timestamp = timestamp_before if initialization_in_accounting else timestamp_after
if now >= timestamp + total_time: # Expired
enqueue_free_experiment_args = self._clean_current_reservation(reservation_id)
enqueue_free_experiment_args_retrieved.append(enqueue_free_experiment_args)
self._delete_reservation(reservation_id)
self.reservations_manager.delete(reservation_id)
# Anybody with latest_access later than this point is expired
current_expiration_time = datetime.datetime.utcfromtimestamp(now - EXPIRATION_TIME)
for expired_reservation_id in self.reservations_manager.list_expired_reservations(current_expiration_time):
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, expired_reservation_id)
pqueue_reservation_data_str = client.get(weblab_reservation_pqueue)
if pqueue_reservation_data_str is None:
continue
pqueue_reservation_data = json.loads(pqueue_reservation_data_str)
if ACTIVE_STATUS in pqueue_reservation_data:
enqueue_free_experiment_args = self._clean_current_reservation(expired_reservation_id)
enqueue_free_experiment_args_retrieved.append(enqueue_free_experiment_args)
self._delete_reservation(expired_reservation_id)
self.reservations_manager.delete(expired_reservation_id)
for enqueue_free_experiment_args in enqueue_free_experiment_args_retrieved:
if enqueue_free_experiment_args is not None:
self.confirmer.enqueue_free_experiment(*enqueue_free_experiment_args)
def _delete_reservation(self, reservation_id):
weblab_resource_pqueue_reservations = WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name
weblab_resource_pqueue_map = WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name
weblab_resource_pqueue_sorted = WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name
weblab_reservation_pqueue = WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id)
resource_instances = self.resources_manager.list_resource_instances_by_type(self.resource_type_name)
client = self.redis_maker()
pipeline = client.pipeline()
for resource_instance in resource_instances:
weblab_resource_pqueue_instance_reservations = WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, resource_instance.resource_instance)
pipeline.srem(weblab_resource_pqueue_instance_reservations, reservation_id)
pipeline.srem(weblab_resource_pqueue_reservations, reservation_id)
pipeline.delete(weblab_reservation_pqueue)
pipeline.execute()
filled_reservation_id = client.hget(weblab_resource_pqueue_map, reservation_id)
client.hdel(weblab_resource_pqueue_map, reservation_id)
client.zrem(weblab_resource_pqueue_sorted, filled_reservation_id)
##############################################################
#
# ONLY FOR TESTING: It completely removes the whole database
#
@Override(Scheduler)
def _clean(self):
client = self.redis_maker()
for reservation_id in self.reservations_manager.list_all_reservations():
client.delete(WEBLAB_RESOURCE_RESERVATION_PQUEUE % (self.resource_type_name, reservation_id))
client.delete(WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, '*'))
for resource_instance in self.resources_manager.list_resource_instances_by_type(self.resource_type_name):
client.delete(WEBLAB_RESOURCE_PQUEUE_INSTANCE_RESERVATIONS % (self.resource_type_name, resource_instance.resource_instance))
client.delete(WEBLAB_RESOURCE_PQUEUE_RESERVATIONS % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_POSITIONS % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_MAP % self.resource_type_name)
client.delete(WEBLAB_RESOURCE_PQUEUE_SORTED % self.resource_type_name)
| bsd-2-clause | 1,431,921,179,492,637,200 | 49.902256 | 271 | 0.629483 | false |
cloudify-cosmo/softlayer-python | SoftLayer/CLI/mq/queue_add.py | 1 | 1390 | """Create a queue."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import mq
import click
@click.command()
@click.argument('account-id')
@click.argument('queue-name')
@click.option('--datacenter', help="Datacenter, E.G.: dal05")
@click.option('--network',
type=click.Choice(['public', 'private']),
help="Network type")
@click.option('--visibility-interval',
type=click.INT,
default=30,
help="Time in seconds that messages will re-appear after being "
"popped")
@click.option('--expiration',
type=click.INT,
default=604800,
help="Time in seconds that messages will live")
@click.option('--tag', '-g', multiple=True, help="Tags to add to the queue")
@environment.pass_env
def cli(env, account_id, queue_name, datacenter, network, visibility_interval,
expiration, tag):
"""Create a queue."""
manager = SoftLayer.MessagingManager(env.client)
mq_client = manager.get_connection(account_id,
datacenter=datacenter, network=network)
queue = mq_client.create_queue(
queue_name,
visibility_interval=visibility_interval,
expiration=expiration,
tags=tag,
)
return mq.queue_table(queue)
| mit | 7,730,772,881,855,012,000 | 31.325581 | 78 | 0.621583 | false |
cloudendpoints/endpoints-tools | auth/generate-jwt.py | 1 | 2375 | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python script generates a signed JWT token based on the input payload"""
import argparse
import time
import oauth2client.crypt
from oauth2client.service_account import ServiceAccountCredentials
def main(args):
"""Generates a signed JSON Web Token using a Google API Service Account."""
credentials = ServiceAccountCredentials.from_json_keyfile_name(
args.service_account_file)
now = int(time.time())
payload = {
"exp": now + credentials.MAX_TOKEN_LIFETIME_SECS,
"iat": now,
"aud": args.aud,
}
if args.email:
payload["email"] = args.email
if args.groupId:
payload["groupId"] = args.groupId
if args.issuer:
payload["iss"] = args.issuer
payload["sub"] = args.issuer
else:
payload["iss"] = credentials.service_account_email
payload["sub"] = credentials.service_account_email
signed_jwt = oauth2client.crypt.make_signed_jwt(
credentials._signer, payload, key_id=credentials._private_key_id)
return signed_jwt
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# positional arguments
parser.add_argument(
"aud",
help="Audience . This must match 'audience' in the security configuration"
" in the swagger spec. It can be any string")
parser.add_argument(
'service_account_file',
help='The path to your service account json file.')
#optional arguments
parser.add_argument("-e", "--email", help="Email claim in JWT")
parser.add_argument("-g", "--groupId", help="GroupId claim in JWT")
parser.add_argument("-iss", "--issuer", help="Issuer claim. This will also be used for sub claim")
print main(parser.parse_args())
| apache-2.0 | 7,548,041,424,393,917,000 | 32.450704 | 100 | 0.703579 | false |
xjw1001001/IGCexpansion | test/Ancestral_reconstruction/IGCcluster_analysis.py | 1 | 46187 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 22:00:31 2017
@author: xjw1001001
"""
import numpy as np
from IGCexpansion.CodonGeneconFunc import *
llpath = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/test/Ancestral_reconstruction/'
path = llpath + 'matrix/sitewise_IGC_statmatrix/'
paralog_list = [['YLR406C', 'YDL075W'],#pair#TODO: other data
['YER131W', 'YGL189C'], ['YML026C', 'YDR450W'], ['YNL301C', 'YOL120C'], ['YNL069C', 'YIL133C'],
['YMR143W', 'YDL083C'], ['YJL177W', 'YKL180W'], ['YBR191W', 'YPL079W'], ['YER074W', 'YIL069C'],
['YDR418W', 'YEL054C'], ['YBL087C', 'YER117W'], ['YLR333C', 'YGR027C'], ['YMR142C', 'YDL082W'],
['YER102W', 'YBL072C'], ['EDN', 'ECP'],['ERa', 'ERb'],['ARa', 'ERa'],['AR', 'MR'],['AR', 'GR'],['AR', 'PR'],
['MR', 'GR'],['MR', 'PR'],['PR', 'GR'] ]
ARMRGRPR_list = [['AR', 'MR'],['AR', 'GR'],['AR', 'PR'],['MR', 'GR'],['MR', 'PR'],['PR', 'GR']]
Yeast_list = [['YLR406C', 'YDL075W'], ['YER131W', 'YGL189C'],['YML026C', 'YDR450W'], ['YNL301C', 'YOL120C'], ['YNL069C', 'YIL133C'],
['YMR143W', 'YDL083C'], ['YJL177W', 'YKL180W'], ['YBR191W', 'YPL079W'], ['YER074W', 'YIL069C'], ['YDR418W', 'YEL054C'], ['YBL087C', 'YER117W'],
['YLR333C', 'YGR027C'],['YMR142C', 'YDL082W'], ['YER102W', 'YBL072C']]
EDNECP_newicktree ='/Users/xjw1001001/Documents/GitHub/IGCexpansion2/reconstruction_data/Zhang2002_data ECPEDN/from gene bank/primate_EDN_ECP.newick'
Yeast_newicktree = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/YeastTree.newick'
ERa_ERb_newicktree = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/reconstruction_data/SR_Thornton/ER/species.newick'
ARa_ERa_newicktree = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/reconstruction_data/SR_Thornton/ARa_ERa/ERa_ARa_species.newick'
ARMRGRPR_newicktree = '/Users/xjw1001001/Documents/GitHub/IGCexpansion2/reconstruction_data/SR_Thornton/AR_MR_GR_PR/species_common/species_common.newick'
bases = 'tcag'.upper()
codons = [a+b+c for a in bases for b in bases for c in bases]
amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
codon_table = dict(zip(codons, amino_acids))
codon_nonstop = [a for a in codon_table.keys() if not codon_table[a]=='*']
codon_to_state = {a.upper() : i for (i, a) in enumerate(codon_nonstop)}
state_to_codon = {i : a.upper() for (i, a) in enumerate(codon_nonstop)}
state_to_codon[61] = 'xxx'
pair_to_state = {pair:i for i, pair in enumerate(product(codon_nonstop, repeat = 2))}
Yeast_node = 13
EDNECP_node = 25
Model_list = ['IGC','tau=0']#model
#'arg_' +'_'.join(pair) + '_MG94_' + model + '.npy'
def state_to_compositecodons(state):
state_1, state_2 = divmod(state, 62)
state_1 = int(state_1)
state_2 = int(state_2)
return (state_to_codon[state_1],state_to_codon[state_2])
#read data of posterior etc
Expected_tau = {} #paralog array
ExpectedIGC = {}
ExpectedIGC['num'] = {}
ExpectedIGC['1to2'] = {}
ExpectedIGC['2to1'] = {}
model = {}
posterior = {}
posterior['1to2'] = {}
posterior['2to1'] = {}
posterior['IGC'] = {}
ExpectedIGC['point'] = {}
ExpectedIGC['proportion'] = {}
#'_'.join(pair)
for pair in paralog_list:
model['_'.join(pair)] = {}
model['_'.join(pair)]['IGC'] = np.loadtxt(open(llpath + 'model_likelihood/ancestral_reconstruction_' + '_'.join(pair) + '_MG94_IGCBFGS.txt','r'))
model['_'.join(pair)]['tau=0'] = np.loadtxt(open(llpath + 'model_likelihood/ancestral_reconstruction_' + '_'.join(pair) + '_MG94_tau=0BFGS.txt','r'))
model['_'.join(pair)]['PAML'] = np.loadtxt(open(llpath + 'PAML/output/summary/' + '_'.join(pair) + '.txt','r'))
Expected_tau['_'.join(pair)] = np.loadtxt(open(path + 'Expected_tau/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['num']['_'.join(pair)] = np.loadtxt(open(path + 'ExpectedIGCnum/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['1to2']['_'.join(pair)] = np.loadtxt(open(path + 'ExpectedIGCnum1_2/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['2to1']['_'.join(pair)] = np.loadtxt(open(path + 'ExpectedIGCnum2_1/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['point']['_'.join(pair)] = np.loadtxt(open(path + 'SitewiseExpectedpointMutation/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
ExpectedIGC['proportion']['_'.join(pair)] = np.loadtxt(open(path + 'Sitewiseporpotion/' + '_'.join(pair) + '_MG94_IGC.txt','r'))
posterior['1to2']['_'.join(pair)] = np.loadtxt(open(path + 'posterior/' + '_'.join(pair) + '_MG94_IGC_1to2.txt','r'))
posterior['2to1']['_'.join(pair)] = np.loadtxt(open(path + 'posterior/' + '_'.join(pair) + '_MG94_IGC_2to1.txt','r'))
posterior['IGC']['_'.join(pair)] = np.loadtxt(open(path + 'posterior/' + '_'.join(pair) + '_MG94_IGC_IGC.txt','r'))
#generate 0 1 x sequence for each data
reconstruct_path = 'matrix/reconstruction_likelihood/npy/'
dict_all = {}
difference_threshold_begin = 0.5 #threshold for difference
difference_threshold_end = 0.5
point_mutation_threshold = 0.2
IGC_high_threshold = 0.5
IGC_low_threshold = 0.1
for pair in paralog_list:
# read data
dict_all['_'.join(pair)]={}
for model in Model_list:
dict_all['_'.join(pair)][model]={}
dict_all['_'.join(pair)][model]['arg'] = np.load(llpath+reconstruct_path+'arg_' +'_'.join(pair) + '_MG94_' + model + '.npy')
dict_all['_'.join(pair)][model]['likelihood'] = np.load(llpath+reconstruct_path+'likelihood_' +'_'.join(pair) + '_MG94_' + model + '.npy')
branchwise_information = {}#1.how about begin difference near 0.5
branchwise_assign_1to2 = {}
branchwise_assign_2to1 = {}
branchwise_assign_IGC = {}
branchwise_display = {}
##Yeast
plist = Yeast_list
tree = Yeast_newicktree
outgroup = 'kluyveri'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
##EDN
plist = [['EDN','ECP']]
tree = EDNECP_newicktree
outgroup = 'Saguinus_oedipus'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
##ERaERb
plist = [['ERa', 'ERb']]
tree = ERa_ERb_newicktree
outgroup = 'Branchiostoma_floridae'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
##ARaERa
plist = [['ARa', 'ERa']]
tree = ARa_ERa_newicktree
outgroup = 'Mus_musculus'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close()
##ARMRGRPR
plist =ARMRGRPR_list
tree = ARMRGRPR_newicktree
outgroup = 'Aplysia_californica'
ktree, edge_list, node_to_num = read_newick(tree, 'N1')
num_to_node = {node_to_num[i]:i for i in node_to_num}
edge_to_num = {edge_list[i]:i for i in range(len(edge_list))}
for pair in plist:
branchwise_information['_'.join(pair)] = {}
branchwise_assign_1to2['_'.join(pair)] = {}
branchwise_assign_2to1['_'.join(pair)] = {}
branchwise_assign_IGC['_'.join(pair)] = {}
branchwise_display['_'.join(pair)] = {}
filename = open(llpath+ 'cluster_result/' + '_'.join(pair) + '.txt' ,'w')
for branch in edge_list:
if branch[1] == outgroup:
continue
printflag = 0
branchwise_display['_'.join(pair)][branch] = [0 for site in range(len(posterior['1to2']['_'.join(pair)]))]
branchwise_information['_'.join(pair)][branch] = []
branchwise_assign_1to2['_'.join(pair)][branch] = ''
branchwise_assign_2to1['_'.join(pair)][branch] = ''
branchwise_assign_IGC['_'.join(pair)][branch] = ''
for site in range(len(posterior['1to2']['_'.join(pair)])):
begin_difference = 0
end_difference = 0
for i in range(10):#probability of first state difference
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][i])
if state1 != state2:
begin_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[0]]][i]
state1, state2 = state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][i])
if state1 != state2:
end_difference += dict_all['_'.join(pair)]['IGC']['likelihood'][site][node_to_num[branch[1]]][i]
branchwise_information['_'.join(pair)][branch].append({})
branchwise_information['_'.join(pair)][branch][site]['begin_difference'] = begin_difference
branchwise_information['_'.join(pair)][branch][site]['end_difference'] = end_difference
branchwise_information['_'.join(pair)][branch][site]['point_mutation'] = ExpectedIGC['point']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] = posterior['1to2']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] = posterior['2to1']['_'.join(pair)][site][edge_to_num[branch]]
branchwise_information['_'.join(pair)][branch][site]['IGC'] = posterior['IGC']['_'.join(pair)][site][edge_to_num[branch]]
if branchwise_information['_'.join(pair)][branch][site]['begin_difference'] < difference_threshold_begin:
if branchwise_information['_'.join(pair)][branch][site]['end_difference'] < difference_threshold_end and branchwise_information['_'.join(pair)][branch][site]['point_mutation'] < point_mutation_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='x'
branchwise_assign_2to1['_'.join(pair)][branch]+='x'
branchwise_assign_IGC['_'.join(pair)][branch]+='x'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
else:
if branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_high_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC1to2'] > IGC_low_threshold:
branchwise_assign_1to2['_'.join(pair)][branch]+='X'
else:
branchwise_assign_1to2['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_high_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC2to1'] > IGC_low_threshold:
branchwise_assign_2to1['_'.join(pair)][branch]+='X'
else:
branchwise_assign_2to1['_'.join(pair)][branch]+='0'
if branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_high_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='1'
elif branchwise_information['_'.join(pair)][branch][site]['IGC'] > IGC_low_threshold:
branchwise_assign_IGC['_'.join(pair)][branch]+='X'
else:
branchwise_assign_IGC['_'.join(pair)][branch]+='0'
for site in range(len(posterior['1to2']['_'.join(pair)])-5):
flag = 0
for i in range(5):
if branchwise_assign_IGC['_'.join(pair)][branch][site+i] == '1':
flag += 1
if flag >= 2:
for i in range(5):
branchwise_display['_'.join(pair)][branch][site+i] = 1
printflag = 1
if printflag == 0:
continue
filename.write(str(branch)+ '\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(str(site) + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[0]]][0])[1]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[0]] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1] + '\t')
filename.write('\n')
for site in range(len(posterior['1to2']['_'.join(pair)])):
if branchwise_display['_'.join(pair)][branch][site] == 1:
filename.write(codon_table[state_to_compositecodons(dict_all['_'.join(pair)]['IGC']['arg'][site][node_to_num[branch[1]]][0])[1]] + '\t')
filename.write('\n')
filename.close() | gpl-3.0 | 2,687,871,542,146,271,000 | 60.666222 | 219 | 0.559486 | false |
docusign/docusign-python-client | docusign_esign/models/workspace_settings.py | 1 | 3403 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class WorkspaceSettings(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'comments_allowed': 'str'
}
attribute_map = {
'comments_allowed': 'commentsAllowed'
}
def __init__(self, comments_allowed=None): # noqa: E501
"""WorkspaceSettings - a model defined in Swagger""" # noqa: E501
self._comments_allowed = None
self.discriminator = None
if comments_allowed is not None:
self.comments_allowed = comments_allowed
@property
def comments_allowed(self):
"""Gets the comments_allowed of this WorkspaceSettings. # noqa: E501
# noqa: E501
:return: The comments_allowed of this WorkspaceSettings. # noqa: E501
:rtype: str
"""
return self._comments_allowed
@comments_allowed.setter
def comments_allowed(self, comments_allowed):
"""Sets the comments_allowed of this WorkspaceSettings.
# noqa: E501
:param comments_allowed: The comments_allowed of this WorkspaceSettings. # noqa: E501
:type: str
"""
self._comments_allowed = comments_allowed
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(WorkspaceSettings, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WorkspaceSettings):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit | -6,446,043,402,609,773,000 | 28.08547 | 140 | 0.566559 | false |
civisanalytics/muffnn | muffnn/fm/tests/test_fm_regressor.py | 1 | 7693 | """
Tests for the FM Regressor
based in part on sklearn's logistic tests:
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/linear_model/tests/test_logistic.py
"""
from io import BytesIO
import pickle
import sys
from unittest import mock
import numpy as np
import pytest
import scipy.sparse as sp
from scipy.stats import pearsonr
from sklearn.datasets import load_diabetes, make_regression
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.testing import assert_array_almost_equal, assert_equal
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_predict, KFold
from muffnn import FMRegressor
from muffnn.fm.tests.util import assert_sample_weights_work
# toy dataset where Y = x[0] -2 * x[1] + 2 + err
diabetes = load_diabetes()
X = np.array([[-1, 0], [-2, 1], [1, 1], [2, 0], [-2, 0], [0, 2]],
dtype=np.float32)
Xsp = sp.csr_matrix(X)
Y = X[:, 0] - 2 * X[:, 1] + 2 + \
np.random.RandomState(42).randn(X.shape[0]) * 0.01
# The defaults kwargs don't work for tiny datasets like those in these tests.
KWARGS = {"random_state": 2, "solver": 'L-BFGS-B', "rank": 1}
SGD_KWARGS = {"random_state": 2,
"rank": 1,
"solver": 'L-BFGS-B',
"n_epochs": 1000}
def check_predictions(est, X, y):
"""Check that the model is able to fit the regression training data.
based on
https://github.com/scikit-learn/scikit-learn/blob/af171b84bd3fb82eed4569aa0d1f976264ffae84/sklearn/linear_model/tests/test_logistic.py#L38
"""
n_samples = len(y)
preds = est.fit(X, y).predict(X)
assert_equal(preds.shape, (n_samples,))
assert_array_almost_equal(preds, y, decimal=1)
class FMRegressorLBFGSB(FMRegressor):
def __init__(self):
super(FMRegressorLBFGSB, self).__init__(
rank=1, solver='L-BFGS-B', random_state=2)
def test_make_feed_dict():
"""Test that the feed dictionary works ok."""
reg = FMRegressor()
reg.is_sparse_ = False
reg._y = 0
reg._x = 1
reg._sample_weight = 'sample_weight'
output_size = 1
reg._output_size = output_size
fd = reg._make_feed_dict(np.array(X), np.array(Y))
expected_keys = {0, 1, 'sample_weight'}
assert set(fd.keys()) == expected_keys
assert_array_almost_equal(fd[reg._y], Y)
assert fd[reg._y].dtype == np.float32, (
"Wrong data type for y w/ output_size = 1 in feed dict!")
assert_array_almost_equal(fd[reg._x], X)
assert fd[reg._x].dtype == np.float32, (
"Wrong data dtype for X in feed dict!")
def test_make_feed_dict_sparse():
"""Test that the feed dictionary works ok for sparse inputs."""
reg = FMRegressor()
reg.is_sparse_ = True
reg._y = 0
reg._x_inds = 1
reg._x_vals = 2
reg._x_shape = 3
reg._sample_weight = 'sample_weight'
# changing this so test catches indexing errors
X = [[-1, 0], [0, 1], [2, 3]]
output_size = 1
reg._output_size = output_size
fd = reg._make_feed_dict(np.array(X), np.array(Y))
assert_array_almost_equal(fd[reg._y], Y)
if output_size == 1:
assert fd[reg._y].dtype == np.float32, (
"Wrong data type for y w/ output_size = 1 in feed dict!")
else:
assert fd[reg._y].dtype == np.int32, (
"Wrong data type for y w/ output_size > 1 in feed dict!")
# Sparse arrays for TF are in row-major sorted order.
assert_array_almost_equal(
fd[reg._x_inds], [[0, 0], [1, 1], [2, 0], [2, 1]])
assert fd[reg._x_inds].dtype == np.int64, (
"Wrong data type for sparse inds in feed dict!")
assert_array_almost_equal(fd[reg._x_vals], [-1, 1, 2, 3])
assert fd[reg._x_vals].dtype == np.float32, (
"Wrong data type for sparse vals in feed dict!")
assert_array_almost_equal(fd[reg._x_shape], [3, 2])
assert fd[reg._x_shape].dtype == np.int64, (
"Wrong data type for sparse shape in feed dict!")
def test_check_estimator():
"""Check adherence to Estimator API."""
if sys.version_info.major == 3 and sys.version_info.minor == 7:
# Starting in Tensorflow 1.14 and Python 3.7, there's one module
# with a `0` in the __warningregistry__. Scikit-learn tries to clear
# this dictionary in its tests.
name = 'tensorboard.compat.tensorflow_stub.pywrap_tensorflow'
with mock.patch.object(sys.modules[name], '__warningregistry__', {}):
check_estimator(FMRegressorLBFGSB)
else:
check_estimator(FMRegressorLBFGSB)
def test_predict_lbfgsb():
"""Test regression w/ L-BFGS-B."""
check_predictions(FMRegressor(**KWARGS), X, Y)
def test_predict_sgd():
"""Test regression w/ SGD."""
check_predictions(FMRegressor(**SGD_KWARGS), X, Y)
def test_sparse_lbfgsb():
"""Test sparse matrix handling w/ L-BFGS-B."""
check_predictions(FMRegressor(**KWARGS), Xsp, Y)
def test_sparse_sgd():
"""Test sparse matrix handling w/ SGD."""
check_predictions(FMRegressor(**SGD_KWARGS), Xsp, Y)
def test_persistence():
reg = FMRegressor(random_state=42)
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
pred1 = reg.fit(X_diabetes, y_diabetes).predict(X_diabetes)
b = BytesIO()
pickle.dump(reg, b)
reg2 = pickle.loads(b.getvalue())
pred2 = reg2.predict(X_diabetes)
assert_array_almost_equal(pred1, pred2)
def test_replicability():
"""Test that models can be pickled and reloaded."""
reg = FMRegressor(random_state=42)
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
# Just predict on the training set, for simplicity.
pred1 = reg.fit(X_diabetes, y_diabetes).predict(X_diabetes)
pred2 = reg.fit(X_diabetes, y_diabetes).predict(X_diabetes)
assert_array_almost_equal(pred1, pred2)
def test_partial_fit():
# FMClassifier tests don't need **KWARGS but otherwise we get garbage
reg = FMRegressor(**KWARGS)
X, y = diabetes.data, diabetes.target
for _ in range(30):
reg.partial_fit(X, y)
y_pred = reg.predict(X)
assert pearsonr(y_pred, y)[0] > 0.5
def test_multioutput():
"""Check that right sized array is return when doing one prediction."""
reg = FMRegressor(random_state=2)
X, y = diabetes.data, diabetes.target
y = np.concatenate([y.reshape(-1, 1), y.reshape(-1, 1)], axis=1)
with pytest.raises(ValueError):
# not implemented!
reg.fit(X, y)
def test_predict_one():
"""Check that right sized array is return when doing one prediction."""
reg = FMRegressor(random_state=2)
X, y = diabetes.data, diabetes.target
reg.fit(X, y)
p = reg.predict(X[0:1, :])
assert p.shape == (1, )
def test_cross_val_predict():
"""Make sure it works in cross_val_predict."""
Xt = StandardScaler().fit_transform(X)
reg = FMRegressor(rank=2, solver='L-BFGS-B', random_state=4567).fit(Xt, Y)
cv = KFold(n_splits=2, random_state=457, shuffle=True)
y_oos = cross_val_predict(reg, Xt, Y, cv=cv, method='predict')
p_r = pearsonr(Y, y_oos)[0]
assert p_r >= 0.90, "Pearson R too low for fake data in cross_val_predict!"
def test_sample_weight():
assert_sample_weights_work(
make_regression,
{'n_samples': 3000},
# TF SGD does not work so well....
lambda: FMRegressor(rank=2, solver='L-BFGS-B', random_state=4567)
)
| bsd-3-clause | 8,741,935,526,478,151,000 | 31.459916 | 142 | 0.640972 | false |
trevor/calendarserver | txdav/caldav/datastore/scheduling/ischedule/test/test_delivery.py | 1 | 2853 | ##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks
from twisted.names import client
from twisted.python.modules import getModule
from twisted.trial import unittest
from twistedcaldav.stdconfig import config
from txdav.caldav.datastore.scheduling.ischedule import utils
from txdav.caldav.datastore.scheduling.ischedule.delivery import ScheduleViaISchedule
class CalDAV (unittest.TestCase):
"""
txdav.caldav.datastore.scheduling.caldav tests
"""
def tearDown(self):
"""
By setting the resolver to None, it will be recreated next time a name
lookup is done.
"""
client.theResolver = None
utils.DebugResolver = None
@inlineCallbacks
def test_matchCalendarUserAddress(self):
"""
Make sure we do an exact comparison on EmailDomain
"""
self.patch(config.Scheduling.iSchedule, "Enabled", True)
self.patch(config.Scheduling.iSchedule, "RemoteServers", "")
# Only mailtos:
result = yield ScheduleViaISchedule.matchCalendarUserAddress("http://example.com/principal/user")
self.assertFalse(result)
# Need to setup a fake resolver
module = getModule(__name__)
dataPath = module.filePath.sibling("data")
bindPath = dataPath.child("db.example.com")
self.patch(config.Scheduling.iSchedule, "DNSDebug", bindPath.path)
utils.DebugResolver = None
utils._initResolver()
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:[email protected]")
self.assertTrue(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:[email protected]")
self.assertFalse(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:[email protected]?subject=foobar")
self.assertFalse(result)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:user")
self.assertFalse(result)
# Test when not enabled
ScheduleViaISchedule.domainServerMap = {}
self.patch(config.Scheduling.iSchedule, "Enabled", False)
result = yield ScheduleViaISchedule.matchCalendarUserAddress("mailto:[email protected]")
self.assertFalse(result)
| apache-2.0 | 8,254,567,666,733,260,000 | 38.082192 | 110 | 0.716789 | false |
FEniCS/fiat | FIAT/orthopoly.py | 1 | 10895 | """
orthopoly.py - A suite of functions for generating orthogonal polynomials
and quadrature rules.
Copyright (c) 2014 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Last updated on Wed Jan 1 14:29:25 MST 2014
Modified by David A. Ham ([email protected]), 2016
"""
import numpy as np
from functools import reduce
from math import gamma
def gauss(alpha, beta):
"""
Compute the Gauss nodes and weights from the recursion
coefficients associated with a set of orthogonal polynomials
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
Outputs:
x - quadrature nodes
w - quadrature weights
Adapted from the MATLAB code by Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/gauss.m
"""
from numpy.linalg import eigh
A = np.diag(np.sqrt(beta)[1:], 1) + np.diag(alpha)
x, V = eigh(A, "U")
w = beta[0] * np.real(np.power(V[0, :], 2))
return x, w
def lobatto(alpha, beta, xl1, xl2):
"""
Compute the Lobatto nodes and weights with the preassigned
nodea xl1,xl2
Inputs:
alpha - recursion coefficients
beta - recursion coefficients
xl1 - assigned node location
xl2 - assigned node location
Outputs:
x - quadrature nodes
w - quadrature weights
Based on the section 7 of the paper
"Some modified matrix eigenvalue problems"
by Gene Golub, SIAM Review Vol 15, No. 2, April 1973, pp.318--334
"""
from numpy.linalg import solve
n = len(alpha) - 1
en = np.zeros(n)
en[-1] = 1
A1 = np.vstack((np.sqrt(beta), alpha - xl1))
J1 = np.diag(A1[0, 1:-1], 1) + np.diag(A1[1, 1:]) + np.diag(A1[0, 1:-1], -1)
A2 = np.vstack((np.sqrt(beta), alpha - xl2))
J2 = np.diag(A2[0, 1:-1], 1) + np.diag(A2[1, 1:]) + np.diag(A2[0, 1:-1], -1)
g1 = solve(J1, en)
g2 = solve(J2, en)
C = np.array(((1, -g1[-1]), (1, -g2[-1])))
xl = np.array((xl1, xl2))
ab = solve(C, xl)
alphal = alpha
alphal[-1] = ab[0]
betal = beta
betal[-1] = ab[1]
x, w = gauss(alphal, betal)
return x, w
def rec_jacobi(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB code by Dirk Laurie and Walter Gautschi
http://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi.m
"""
nu = (b - a) / float(a + b + 2)
mu = 2 ** (a + b + 1) * gamma(a + 1) * gamma(b + 1) / gamma(a + b + 2)
if N == 1:
alpha = nu
beta = mu
else:
n = np.arange(1.0, N)
nab = 2 * n + a + b
alpha = np.hstack((nu, (b ** 2 - a ** 2) / (nab * (nab + 2))))
n = n[1:]
nab = nab[1:]
B1 = 4 * (a + 1) * (b + 1) / float((a + b + 2) ** 2 * (a + b + 3))
B = 4 * (n + a) * (n + b) * n * (n + a + b) / \
(nab ** 2 * (nab + 1) * (nab - 1))
beta = np.hstack((mu, B1, B))
return alpha, beta
def rec_jacobi01(N, a, b):
"""
Generate the recursion coefficients alpha_k, beta_k
for the Jacobi polynomials which are orthogonal on [0,1]
See rec_jacobi for the recursion coefficients on [-1,1]
Inputs:
N - polynomial order
a - weight parameter
b - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jacobi01.m
"""
if a <= -1 or b <= -1:
raise ValueError('''Jacobi coefficients are defined only
for alpha,beta > -1''')
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
c, d = rec_jacobi(N, a, b)
alpha = (1 + c) / 2
beta = d / 4
beta[0] = d[0] / 2 ** (a + b + 1)
return alpha, beta
def polyval(alpha, beta, x):
"""
Evaluate polynomials on x given the recursion coefficients alpha and beta
"""
N = len(alpha)
m = len(x)
P = np.zeros((m, N + 1))
P[:, 0] = 1
P[:, 1] = (x - alpha[0]) * P[:, 0]
for k in range(1, N):
P[:, k + 1] = (x - alpha[k]) * P[:, k] - beta[k] * P[:, k - 1]
return P
def jacobi(N, a, b, x, NOPT=1):
"""
JACOBI computes the Jacobi polynomials which are orthogonal on [-1,1]
with respect to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns the
L2-normalized polynomials
"""
m = len(x)
P = np.zeros((m, N + 1))
apb = a + b
a1 = a - 1
b1 = b - 1
c = apb * (a - b)
P[:, 0] = 1
if N > 0:
P[:, 1] = 0.5 * (a - b + (apb + 2) * x)
if N > 1:
for k in range(2, N + 1):
k2 = 2 * k
g = k2 + apb
g1 = g - 1
g2 = g - 2
d = 2.0 * (k + a1) * (k + b1) * g
P[:, k] = (g1 * (c + g2 * g * x) * P[:, k - 1] -
d * P[:, k - 2]) / (k2 * (k + apb) * g2)
if NOPT == 2:
k = np.arange(N + 1)
pnorm = 2 ** (apb + 1) * gamma(k + a + 1) * gamma(k + b + 1) / \
((2 * k + a + b + 1) * (gamma(k + 1) * gamma(k + a + b + 1)))
P *= 1 / np.sqrt(pnorm)
return P
def jacobiD(N, a, b, x, NOPT=1):
"""
JACOBID computes the first derivatives of the normalized Jacobi
polynomials which are orthogonal on [-1,1] with respect
to the weight w(x)=[(1-x)^a]*[(1+x)^b] and evaluate them
on the given grid up to P_N(x). Setting NOPT=2 returns
the derivatives of the L2-normalized polynomials
"""
z = np.zeros((len(x), 1))
if N == 0:
Px = z
else:
Px = 0.5 * np.hstack((z, jacobi(N - 1, a + 1, b + 1, x, NOPT) *
((a + b + 2 + np.arange(N)))))
return Px
def mm_log(N, a):
"""
MM_LOG Modified moments for a logarithmic weight function.
The call mm=MM_LOG(n,a) computes the first n modified moments of the
logarithmic weight function w(t)=t^a log(1/t) on [0,1] relative to
shifted Legendre polynomials.
REFERENCE: Walter Gautschi,``On the preceding paper `A Legendre
polynomial integral' by James L. Blue'',
Math. Comp. 33 (1979), 742-743.
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/mm_log.m
"""
if a <= -1:
raise ValueError('Parameter a must be greater than -1')
prod = lambda z: reduce(lambda x, y: x * y, z, 1)
mm = np.zeros(N)
c = 1
for n in range(N):
if isinstance(a, int) and a < n:
p = range(n - a, n + a + 2)
mm[n] = (-1) ** (n - a) / prod(p)
mm[n] *= gamma(a + 1) ** 2
else:
if n == 0:
mm[0] = 1 / (a + 1) ** 2
else:
k = np.arange(1, n + 1)
s = 1 / (a + 1 + k) - 1 / (a + 1 - k)
p = (a + 1 - k) / (a + 1 + k)
mm[n] = (1 / (a + 1) + sum(s)) * prod(p) / (a + 1)
mm[n] *= c
c *= 0.5 * (n + 1) / (2 * n + 1)
return mm
def mod_chebyshev(N, mom, alpham, betam):
"""
Calcuate the recursion coefficients for the orthogonal polynomials
which are are orthogonal with respect to a weight function which is
represented in terms of its modifed moments which are obtained by
integrating the monic polynomials against the weight function.
References
----------
John C. Wheeler, "Modified moments and Gaussian quadratures"
Rocky Mountain Journal of Mathematics, Vol. 4, Num. 2 (1974), 287--296
Walter Gautschi, "Orthogonal Polynomials (in Matlab)
Journal of Computational and Applied Mathematics, Vol. 178 (2005) 215--234
Adapted from the MATLAB implementation:
https://www.cs.purdue.edu/archives/2002/wxg/codes/chebyshev.m
"""
if not isinstance(N, int):
raise TypeError('N must be an integer')
if N < 1:
raise ValueError('N must be at least 1')
N = min(N, int(len(mom) / 2))
alpha = np.zeros(N)
beta = np.zeros(N)
normsq = np.zeros(N)
sig = np.zeros((N + 1, 2 * N))
alpha[0] = alpham[0] + mom[1] / mom[0]
beta[0] = mom[0]
sig[1, :] = mom
for n in range(2, N + 1):
for m in range(n - 1, 2 * N - n + 1):
sig[n, m] = sig[n - 1, m + 1] - (alpha[n - 2] - alpham[m]) * sig[n - 1, m] - \
beta[n - 2] * sig[n - 2, m] + betam[m] * sig[n - 1, m - 1]
alpha[n - 1] = alpham[n - 1] + sig[n, n] / sig[n, n - 1] - sig[n - 1, n - 1] / \
sig[n - 1, n - 2]
beta[n - 1] = sig[n, n - 1] / sig[n - 1, n - 2]
normsq = np.diagonal(sig, -1)
return alpha, beta, normsq
def rec_jaclog(N, a):
"""
Generate the recursion coefficients alpha_k, beta_k
P_{k+1}(x) = (x-alpha_k)*P_{k}(x) - beta_k P_{k-1}(x)
for the monic polynomials which are orthogonal on [0,1]
with respect to the weight w(x)=x^a*log(1/x)
Inputs:
N - polynomial order
a - weight parameter
Outputs:
alpha - recursion coefficients
beta - recursion coefficients
Adated from the MATLAB code:
https://www.cs.purdue.edu/archives/2002/wxg/codes/r_jaclog.m
"""
alphaj, betaj = rec_jacobi01(2 * N, 0, 0)
mom = mm_log(2 * N, a)
alpha, beta, _ = mod_chebyshev(N, mom, alphaj, betaj)
return alpha, beta
| lgpl-3.0 | -129,732,180,367,428,380 | 27.372396 | 90 | 0.553281 | false |
slremy/testingpubsub | myBallPlate/remote.py | 1 | 7502 | '''
Copyright (c) 2013 Sekou Remy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
'''
This program takes nine parameters as command line arguments:
the duration of the test,
the step size,
the 3 PID constants for the position of the ball
the 3 PID constants for the angle of the beam
It produces a "fitness value" (higher is better), and provides this response on stdout.
The value is derived from the step response error for the closed-loop system.
python evaluatePID.py http://IPADDRESS:PORT/ duration STEPSIZE KPball KIball KDball KPbeam KIbeam KDbeam
'''
import web
import timeit
from numpy import sign, power, cos, sin
from collections import deque
import signal
from sys import exit, exc_info, argv
from time import sleep
ref0=[]
ref1=[]
try:
client = argv[1]
network = argv[2]
host = argv[3]
port = argv[4]
suffix = argv[5]
clientport = 0;
duration= float(argv[6]) if len(argv) > 6 else 200;
h= float(argv[7]) if len(argv) > 7 else .02;
KpR= float(argv[8]) if len(argv) > 8 else 6;
KiR= float(argv[9]) if len(argv) > 9 else 0; #0;
KdR= float(argv[10]) if len(argv) > 10 else -5.18;
KpM= float(argv[11]) if len(argv) > 11 else -12.08;
KiM= float(argv[12]) if len(argv) > 12 else 0;# 0;
KdM= float(argv[13]) if len(argv) > 13 else -0.4;
localport = argv[14] if len(argv) > 14 else str(int(port)+1000);
except:
print exc_info()[0]
print "syntax is " + argv[0] + " [client] [network] [host] [port] [suffix] duration STEPSIZE KPball KIball KDball KPbeam KIbeam KDbeam"
exit(0)
#Select process method from the correct client
if client == 'tcp':
from tcpclient import *
print "Importing process from tcpclient"
elif client == 'pycurl':
from pycurlclient import *
print "Importing process from pycurlclient"
elif client == 'httplib':
from httplibclient import *
print "Importing process from httplibclient"
elif client == 'urllib':
from urllibclient import *
print "Importing process from urllibclient"
elif client == 'udp':
from udpclient import *
print "Importing process from udpclient"
print "Host: %s:%s/" % (host,port)
#strip off trailing slash and http, if present.
host = host.strip('http://').strip('/');
#set up the best clock that can be accessed on this machine
clock = timeit.default_timer;
#get the current time (time the remote was started).
t0 = clock();
t=0
def closeprocess():
#process(host, port, "/stop?", clientport);
process(host, port, "/init?", clientport);
def catcher(signum, _):
#Sekou, you or someone, should convert this to a PID controller (11/8/2014)
global X, THETA, Y, PHI, t, StateTime, u_x, u_y
global tcrash, crashed, iteration, mse_x, mse_y
if ref0==[]:return
# Update the time and iteration number
iteration += 1
t1 = clock()-t0
url = "/u?&value0=%.4f&value1=%.4f&time=%.6f&stime=%.6f&access=8783392" % (u_x,u_y,t,StateTime);
response=process(host,port,url,clientport);
tr = clock() - t0;
if response != "" and ref0 != []:
X.appendleft( float(response.split()[0]));
THETA.appendleft( float(response.split()[2]));
Y.appendleft( float(response.split()[1]));
PHI.appendleft( float(response.split()[3]));
StateTime = float(response.split()[4])
e_x = ref0 - X[0];
angle_d = AR * (e_x) + BR * (X[0]-X[1]);
if angle_d > angle_max: angle_d=angle_max;
elif angle_d < -angle_max: angle_d=-angle_max;
u_x = AM*(angle_d*16 - THETA[0]) + BM * (THETA[0] - THETA[1])
e_y = ref1 - Y[0];
angle_d1 = AR * (e_y) + BR * (Y[0]-Y[1]);
if angle_d1 > angle_max: angle_d1=angle_max;
elif angle_d1 < -angle_max: angle_d1=-angle_max;
u_y = AM*(angle_d1*16 - PHI[0]) + BM * (PHI[0] - PHI[1])
#Update the performance parameters
mse_x = (mse_x * iteration + e_x**2)/(iteration + 1)
mse_y = (mse_y * iteration + e_y**2)/(iteration + 1)
else:
print "Communication timed out! ", clock() - t0
print "(",ref0, ref1,")", X[-1], Y[-1]
web.config.debug = False;
urls = (
'/a','remotecontroller',
'/reset','reset',
'/stop','closecontroller'
)
app = web.application(urls, globals())
wsgifunc = app.wsgifunc()
wsgifunc = web.httpserver.StaticMiddleware(wsgifunc)
server = web.httpserver.WSGIServer(("0.0.0.0", int(localport)),wsgifunc)
print "http://%s:%s/" % ("0.0.0.0", localport)
class remotecontroller:
def GET(self):
return self.process();
def POST(self):
return self.process();
def process(self):
global ref0, ref1
i = web.input();#print i
(ref0, ref1) = (( float((i.ref0).replace(" ","+")) if hasattr(i, 'ref0') else 0 ), ( float((i.ref1).replace(" ","+")) if hasattr(i, 'ref1') else 0 ))
#print ref0, ref1 , "<<=== desired"
f = "%.4f %.4f %.4f %.4f %s" % (X[-1], Y[-1], THETA[-1], PHI[-1], repr(clock()));
web.header("Content-Type", "text/plain") # Set the Header
web.header("Access-Control-Allow-Origin", "*") # Set the Header
web.header("Access-Control-Allow-Credentials", "true") # Set the Header
return f
class reset:
def GET(self):
return self.process();
def POST(self):
return self.process();
def process(self):
global ref0, ref1
i = web.input();#print i
(ref0, ref1) = ([],[])
print ref0, ref1 , "<<=== desired"
f = "%.4f %.4f %.4f %.4f %s" % (X[-1], Y[-1], THETA[-1], PHI[-1], repr(clock()));
web.header("Content-Type", "text/plain") # Set the Header
web.header("Access-Control-Allow-Origin", "*") # Set the Header
web.header("Access-Control-Allow-Credentials", "true") # Set the Header
return f
def stopper():
server.stop()
exit(0);
if __name__ == "__main__":
(mysignal,myitimer)=(signal.SIGALRM,signal.ITIMER_REAL)
'''
(mysignal,myitimer)=(signal.SIGPROF,signal.ITIMER_PROF)
(mysignal,myitimer)=(signal.SIGVTALRM,signal.ITIMER_VIRTUAL)
'''
if h < duration/3.0 and h > 0.001:
signal.signal(mysignal, catcher)
signal.setitimer(myitimer, h, h)
try:
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
print exc_info()[0],"Shutting down service"
#closeprocess();
#return value
| mit | 1,301,263,015,616,686,600 | 34.220657 | 157 | 0.624367 | false |
eriksore/sdn | Old/addflow.py | 1 | 5988 | import json
import networkx as nx
from networkx.readwrite import json_graph
import httplib2
baseUrl = 'http://192.168.231.246:8080/controller/nb/v2'
containerName = 'default'
h = httplib2.Http(".cache")
h.add_credentials('admin', 'admin')
def find_edge(edges, headNode, tailNode):
for edge in odlEdges:
if edge['edge']['headNodeConnector']['node']['id'] == headNode and edge['edge']['tailNodeConnector']['node']['id'] == tailNode:
return edge
return None
def find_ports(edges, headNode, tailNode):
for edge in odlEdges:
if edge['edge']['headNodeConnector']['node']['id'] == headNode and edge['edge']['tailNodeConnector']['node']['id'] == tailNode:
portId = edge['properties']['name']['value']
return portId
return None
def put_path(path, odlEdges, srcIP, dstIP, baseUrl):
for i, node in enumerate(path[1:-1]):
flowName = "fromIP" + srcIP[-1:] + "Po" + str(i)
ingressEdge = find_edge(odlEdges, shortest_path[i], node)
egressEdge = find_edge(odlEdges, node, shortest_path[i+2])
newFlow = build_flow_entry(flowName, ingressEdge, egressEdge, node, srcIP, dstIP)
switchType = newFlow['node']['type']
putUrl = build_flow_url(baseUrl, 'default', switchType, node, flowName)
# PUT the flow to the controller
resp, content = put_dict(h, putUrl, newFlow)
def build_flow_entry(flowName, ingressEdge, egressEdge, node, srcIP, dstIP):
defaultPriority = "500"
newFlow = {"installInHw":"false"}
ingressPort = ingressEdge['edge']['tailNodeConnector']['id']
egressPort = egressEdge['edge']['headNodeConnector']['id']
switchType = egressEdge['edge']['headNodeConnector']['node']['type']
newFlow.update({"name":flowName})
newFlow.update({"node":ingressEdge['edge']['tailNodeConnector']['node']})
newFlow.update({"ingressPort":ingressPort, "priority":defaultPriority})
newFlow.update({"actions":"OUTPUT=" + egressPort})
return newFlow
#Second level URL build
def build_url(baseUrl, service, containerName):
putUrl = '/'.join([baseUrl, service, containerName])
return putUrl
#Build URL to work with flows on nodes
def build_flow_url(baseUrl, containerName, switchType, switchId, flowName):
putUrl = build_url(baseUrl, 'flowprogrammer', containerName) +'/node'+ '/'.join(['', switchType, switchId,'staticFlow', flowName])
return putUrl
def put_dict(h, url, d):
resp, content = h.request(
uri = url,
method = 'PUT',
headers={'Content-Type' : 'application/json'},
body=json.dumps(d),
)
return resp, content
def build_flow_rule_for_node():
return None
# Get all the edges/links
resp, content = h.request(build_url(baseUrl, 'topology', containerName), "GET")
edgeProperties = json.loads(content)
odlEdges = edgeProperties['edgeProperties']
#print json.dumps(odlEdges, indent = 2)
# Get all the nodes/switches
resp, content = h.request(build_url(baseUrl, 'switchmanager', containerName) + '/nodes/', "GET")
nodeProperties = json.loads(content)
odlNodes = nodeProperties['nodeProperties']
#print json.dumps(odlNodes, indent = 2)
#Print information about one specific node
resp, content = h.request(build_url(baseUrl, 'switchmanager',containerName) + '/node/OF/00:00:00:00:00:00:00:03', "GET")
nodeParam = json.loads(content)
nodeParameters = nodeParam['nodeConnectorProperties']
#print json.dumps(nodeParameters, indent = 2)
# Put nodes and edges into a graph
graph = nx.Graph()
for node in odlNodes:
graph.add_node(node['node']['id'])
for edge in odlEdges:
e = (edge['edge']['headNodeConnector']['node']['id'], edge['edge']['tailNodeConnector']['node']['id'])
graph.add_edge(*e)
#print "graph.edges()"
print graph.edges()
# Print out graph info as a sanity check
#print "shortest path from 3 to 7"
shortest_path = nx.shortest_path(graph, "00:00:00:00:00:00:00:03", "00:00:00:00:00:00:00:07")
#print shortest_path
srcIP = "10.0.0.1" #raw_input('What is the source IP?> ')
dstIP = "10.0.0.8" #raw_input('What is the destination IP?> ')
put_path(shortest_path, odlEdges, srcIP, dstIP, baseUrl)
put_path(shortest_path, odlEdges, dstIP, srcIP, baseUrl)
#print h.request(build_url(baseUrl, 'topology', containerName), "GET")
#Test to GET out the flows from a node
resp, content = h.request(build_url(baseUrl, 'flowprogrammer', containerName) + '/node/OF/00:00:00:00:00:00:00:03', "GET")
flowConfig = json.loads(content)
flowConf = flowConfig['flowConfig']
#print json.dumps(flowConf, indent = 2)
#Print out the topology
resp, content = h.request(build_url(baseUrl,'topology',containerName),"GET")
allTopology = json.loads(content)
allTopo = allTopology['edgeProperties']
#print json.dumps(allTopo, indent = 2)
#headNode = "00:00:00:00:00:00:00:03"
#tailNode = "00:00:00:00:00:00:00:02"
def add_sp_flows(shortest_path):
for i in range(len(shortest_path)-1):
headNode = shortest_path[i]
tailNode = shortest_path[i+1]
#Forward flow
flowName = headNode[21:23] + 'to' + tailNode[21:23] + 'IPto' + dstIP
outPutPort = find_ports(edge, shortest_path[i], shortest_path[i+1])
flowRule = {"node":{"type":"OF", "id":headNode},"installInHw":"true","name":flowName,"etherType":"0x800", "actions":["OUTPUT="+outPutPort[-1]],"priority":"500","nwDst":dstIP}
putUrl = build_flow_url(baseUrl, 'default',"OF", headNode, flowName)
resp, content = put_dict(h, putUrl, flowRule)
#Backward flow
flowName = tailNode[21:23] + 'to' + headNode[21:23] + 'IPto' + srcIP
outPutPort = find_ports(edge, shortest_path[i+1], shortest_path[i])
flowRule = {"node":{"type":"OF", "id":tailNode},"installInHw":"true","name":flowName,"etherType":"0x800", "actions":["OUTPUT="+outPutPort[-1]],"priority":"500","nwDst":srcIP}
putUrl = build_flow_url(baseUrl, 'default',"OF", tailNode, flowName)
resp, content = put_dict(h, putUrl, flowRule)
print flowRule
print "Flows have been added!"
add_sp_flows(shortest_path)
| mit | -1,614,679,354,172,069,600 | 37.140127 | 182 | 0.679192 | false |
RyanWHowe/SunStat | SunStat/SunStat.py | 1 | 9538 | from __future__ import division
import datetime
import math
__author__ = 'Ryan W. Howe'
class SunStat:
def __init__(self, latitude, longitude, year, month, day, utcoffset=0):
"""SunStat class is able to provide the sunset, sunrise, or noon time for the Sun at a provided date.
DISCLAIMER These calculations are theoretically accurate to the minute for locations between +/- 72 degrees
latitude, and within 10 minutes outside those latitudes. However due to variations in atmospheric composition,
temperature, pressure and conditions, observed values my vary from calculations.
The calculations (other than the Julian Day) are all taken from the NOAA, which came from Astronomical
Algorithms written by Jean Meeus.
:param latitude: Observation Latitude (+ North)
:param longitude: Observation Longitude (+ East)
:param year: Year of the Date for Observation
:param month: Month of the Date for Observation
:param day: Day of the Date for Observation
:param utcoffset: current UTC offset (else time objects are in UTC)
:return:
"""
try:
self.__date = datetime.date(year, month, day)
except:
raise SunStatException(self, "An invalid calendar date was entered")
if (latitude >= 90) | (latitude <= -90):
raise SunStatException(self, "Latitude range is 89.9999 to -89.9999")
else:
self.__latitude = latitude
if (longitude >= 180) | (longitude <= -180):
raise SunStatException(self, "Longitude range is 179.9999 to -179.9999")
else:
self.__longitude = longitude
if (utcoffset > 14) | (utcoffset < -14):
raise SunStatException(self, "UTC offsets are only valid between 14 and -14")
else:
self.__utcoffset = utcoffset
def julian_day(self):
""" This converts the current Gregorian calendar date into a Julian Day Number.
which means how many days have passed since noon November 24th, 4714 BC(Gregorian Calendar) at Greenwich
Once you understand that the math makes more sense.
all math taken from http://en.wikipedia.org/wiki/Julian_day
a is calculated so that it will be a 1 for January and February and 0 for all other months
y is the number of years since March 1st, -4800 (with a correction if we are in either Jan or Feb)
m is the current month since March (ie April = 4 so a = 0 and m = 4 + 12*0 - 3 ==> 1)
for the return calculation the first term is simply the day of the month
The second is some math magic that comes from integer division, using the m calculated above and removing the
remainder, this will give you the appropriate number of whole days since March 1 so for April (m=1)
so (153 * 1 + 2)// 5 = 31 (there are 31 days in March and that is how many have passed for April 1 for May 1
(m = 2) and (153 * 2 + 2)//5 = 61 (31 days for March and 30 for April). Give it a try, it is kinda cool!
Third term is simply the days in a standard calendar year (given the years as computed above)
Fourth, fifth, sixth terms all correct for leap years
so you add a day every year that is divisible by 4, subtract 1 back for the ones that are also divisible
by 100 then add back in the ones that are also divisible by 400 you can check here
http://en.wikipedia.org/wiki/Leap_year , I had to in theory (365 * y) could have just been replaced by
(365.2425 * y) but there is enough going on that I stuck with the wiki calculations
Lastly there is the number of days that are over counted going to 4800 BC, since we are only suppose to go to
4714 BC, the length of the year was calculated differently before the Gregorian calendar existed this factor
gets us to the correct whole date from March 1st, 4801 BC (or March 1st, -4800)
:return: float Julian Day of the passed Gregorian calendar date
"""
a = (14 - self.__date.month) // 12
y = self.__date.year + 4800 - a
m = self.__date.month + 12 * a - 3
return self.__date.day + ((153 * m + 2) // 5) + (365 * y) + (y // 4) - (y // 100) + (y // 400) - 32045
def julian_century(self):
"""Compute the current Julian Century
Julian Century starts from the Julian day of 2000/01/01 at 12:00 UTC which is a Julian Day of 2451545.0
The Julian calendar is exactly 365.25 days long, therefor the Julian century is 36525 days long.
Starting with the current Julian day the calculation is straight forward.
:return:float Julian Century
"""
return (self.julian_day() - 2451545) / 36525
def geom_mean_long_sun(self):
"""Calculate the mean longitude Solar Coordinate
:return:float degrees
"""
return 280.46646 + self.julian_century() * (36000.76983 + self.julian_century() * 0.0003032) % 360
def geom_mean_anom_sun(self):
"""Calculate the anomaly Solar Coordinate
:return:float degrees
"""
return 357.52911 + self.julian_century() * (35999.05029 - 0.0001537 * self.julian_century())
def sun_eq_of_cent(self):
"""Calculate the off center Solar Coordinate
:return:float degree
"""
return math.sin(math.radians(self.geom_mean_anom_sun())) * (
1.914602 - self.julian_century() * (0.004817 + 0.000014 * self.julian_century())) + math.sin(
math.radians(2 * self.geom_mean_anom_sun())) * (0.019993 - 0.000101 * self.julian_century()) + math.sin(
math.radians(3 * self.geom_mean_anom_sun())) * 0.000289
def sun_true_long(self):
return self.geom_mean_long_sun() + self.sun_eq_of_cent()
def sun_app_long(self):
return self.sun_true_long() - 0.00569 - 0.00478 * math.sin(
math.radians(125.04 - 1934.136 * self.julian_century()))
def mean_obliq_ecliptic(self):
"""
Calculate the ecliptic longitude to right ascension and declination delta
:return:
"""
return 23 + (26 + ((21.448 - self.julian_century() * (
46.815 + self.julian_century() * (0.00059 - self.julian_century() * 0.001813)))) / 60) / 60
def obliq_corr(self):
return self.mean_obliq_ecliptic() + 0.00256 * math.cos(math.radians(125.04 - 1934.136 * self.julian_century()))
def sun_declin(self):
return math.degrees(
math.asin(math.sin(math.radians(self.obliq_corr())) * math.sin(math.radians(self.sun_app_long()))))
def ha_sunrise(self):
return math.degrees(math.acos(math.cos(math.radians(90.833)) / (
math.cos(math.radians(self.__latitude)) * math.cos(math.radians(self.sun_declin()))) - math.tan(
math.radians(self.__latitude)) * math.tan(math.radians(self.sun_declin()))))
def eccent_earth_orbit(self):
return 0.016708634 - self.julian_century() * (0.000042037 + 0.0000001267 * self.julian_century())
def var_y(self):
return math.tan(math.radians(self.obliq_corr() / 2)) * math.tan(math.radians(self.obliq_corr() / 2))
def eq_of_time(self):
return 4 * math.degrees(
self.var_y() * math.sin(2 * math.radians(self.geom_mean_long_sun()))
- 2 * self.eccent_earth_orbit() * math.sin(math.radians(self.geom_mean_anom_sun()))
+ 4 * self.eccent_earth_orbit() * self.var_y() * math.sin(
math.radians(self.geom_mean_anom_sun())) * math.cos(2 * math.radians(self.geom_mean_long_sun()))
- 0.5 * self.var_y() * self.var_y() * math.sin(4 * math.radians(self.geom_mean_long_sun()))
- 1.25 * self.eccent_earth_orbit() * self.eccent_earth_orbit() * math.sin(
2 * math.radians(self.geom_mean_anom_sun()))
)
def __solar_noon_since_midnight(self):
return 720 - 4 * self.__longitude - self.eq_of_time() + self.__utcoffset * 60
def __sun_rise_since_midnight(self):
return self.__solar_noon_since_midnight() - self.ha_sunrise() * 4
def __sun_set_since_midnight(self):
return self.__solar_noon_since_midnight() + self.ha_sunrise() * 4
def sunrise(self):
_hour = self.__sun_rise_since_midnight() // 60
_minute = self.__sun_rise_since_midnight() - (60 * _hour)
_second, _minute = math.modf(_minute)
_second *= 60
_millisecond, _second = math.modf(_second)
return datetime.time(int(_hour), int(_minute), int(_second), int(_millisecond * 1000000))
def sunset(self):
_hour = self.__sun_set_since_midnight() // 60
_minute = self.__sun_set_since_midnight() - (60 * _hour)
_second, _minute = math.modf(_minute)
_second *= 60
_millisecond, _second = math.modf(_second)
return datetime.time(int(_hour), int(_minute), int(_second), int(_millisecond * 1000000))
def noon(self):
_hour = self.__solar_noon_since_midnight() // 60
_minute = self.__solar_noon_since_midnight() - (60 * _hour)
_second, _minute = math.modf(_minute)
_second *= 60
_millisecond, _second = math.modf(_second)
return datetime.time(int(_hour), int(_minute), int(_second), int(_millisecond * 1000000))
class SunStatException(Exception):
def __init__(self, parent, msg):
self.message = msg
def __str__(self):
return self.message | mit | 2,214,814,354,007,430,400 | 49.739362 | 120 | 0.622458 | false |
tuzhaopeng/NMT-Coverage | build/lib/groundhog/mainLoop.py | 1 | 13881 | """
Main loop (early stopping).
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
import sys
import traceback
sys.stdout = Unbuffered(sys.stdout)
# Generic imports
import numpy
import cPickle
import gzip
import time
import signal
from groundhog.utils import print_mem, print_time
class MainLoop(object):
def __init__(self,
train_data,
valid_data,
test_data,
model,
algo,
state,
channel,
hooks=None,
reset=-1,
train_cost=False,
validate_postprocess=None,
l2_params=False):
"""
:type train_data: groundhog dataset object
:param train_data: data iterator used for training
:type valid_data: groundhog dataset object
:param valid_data: data iterator used for validation
:type test_data: groundhog dataset object
:param test_data: data iterator used for testing
:type model: groundhog model object
:param model: the model that is supposed to be trained
:type algo: groundhog trainer object
:param algo: optimization algorithm used to optimized the model
:type state: dictionary (or jobman dictionary)
:param state: dictionary containing various hyper-param choices,
but also the current state of the job (the dictionary is used by
jobman to fill in a psql table)
:type channel: jobman handler
:param channel: jobman handler used to communicate with a psql
server
:type hooks: function or list of functions
:param hooks: list of functions that are called every `hookFreq`
steps to carry on various diagnostics
:type reset: int
:param reset: if larger than 0, the train_data iterator position is
reseted to 0 every `reset` number of updates
:type train_cost: bool
:param train_cost: flag saying if the training error (over the
entire training set) should be computed every time the validation
error is computed
:type validate_postprocess: None or function
:param validate_postprocess: function called on the validation cost
every time before applying the logic of the early stopper
:type l2_params: bool
:param l2_params: save parameter norms at each step
"""
###################
# Step 0. Set parameters
###################
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.state = state
self.channel = channel
self.model = model
self.algo = algo
self.valid_id = 0
self.old_cost = 1e21
self.validate_postprocess = validate_postprocess
self.patience = state['patience']
self.l2_params = l2_params
self.train_cost = train_cost
if hooks and not isinstance(hooks, (list, tuple)):
hooks = [hooks]
if self.state['validFreq'] < 0:
self.state['validFreq'] = self.train_data.get_length()
print 'Validation computed every', self.state['validFreq']
elif self.state['validFreq'] > 0:
print 'Validation computed every', self.state['validFreq']
if self.state['trainFreq'] < 0:
self.state['trainFreq'] = self.train_data.get_length()
print 'Train frequency set to ', self.state['trainFreq']
state['bvalidcost'] = 1e21
for (pname, _) in model.properties:
self.state[pname] = 1e20
n_elems = state['loopIters'] // state['trainFreq'] + 1
self.timings = {'step' : 0, 'next_offset' : -1}
for name in self.algo.return_names:
self.timings[name] = numpy.zeros((n_elems,), dtype='float32')
if self.l2_params:
for param in model.params:
self.timings["l2_" + param.name] = numpy.zeros(n_elems, dtype="float32")
n_elems = state['loopIters'] // state['validFreq'] + 1
for pname in model.valid_costs:
self.state['valid'+pname] = 1e20
self.state['test'+pname] = 1e20
self.timings['fulltrain'+pname] = numpy.zeros((n_elems,),
dtype='float32')
self.timings['valid'+pname] = numpy.zeros((n_elems,),
dtype='float32')
self.timings['test'+pname] = numpy.zeros((n_elems,),
dtype='float32')
if self.channel is not None:
self.channel.save()
self.hooks = hooks
self.reset = reset
self.start_time = time.time()
self.batch_start_time = time.time()
def validate(self):
rvals = self.model.validate(self.valid_data)
msg = '** %d validation:' % self.valid_id
self.valid_id += 1
self.batch_start_time = time.time()
pos = self.step // self.state['validFreq']
for k, v in rvals:
msg = msg + ' ' + k + ':%f ' % float(v)
self.timings['valid'+k][pos] = float(v)
self.state['valid'+k] = float(v)
msg += 'whole time %s' % print_time(time.time() - self.start_time)
msg += ' patience %d' % self.patience
print msg
if self.train_cost:
valid_rvals = rvals
rvals = self.model.validate(self.train_data, True)
msg = '** %d train:' % (self.valid_id - 1)
for k, v in rvals:
msg = msg + ' ' + k + ':%6.3f ' % float(v)
self.timings['fulltrain' + k] = float(v)
self.state['fulltrain' + k] = float(v)
print msg
rvals = valid_rvals
self.state['validtime'] = float(time.time() - self.start_time)/60.
# Just pick the first thing that the cost returns
cost = rvals[0][1]
if self.state['bvalidcost'] > cost:
self.state['bvalidcost'] = float(cost)
for k, v in rvals:
self.state['bvalid'+k] = float(v)
self.state['bstep'] = int(self.step)
self.state['btime'] = int(time.time() - self.start_time)
self.test()
else:
print 'No testing', cost, '>', self.state['bvalidcost']
for k, v in self.state.items():
if 'test' in k:
print k, v
print_mem('validate')
if self.validate_postprocess:
return self.validate_postprocess(cost)
return cost
def test(self):
self.model.best_params = [(x.name, x.get_value()) for x in
self.model.params]
numpy.savez(self.state['prefix'] + '_best_params',
**dict(self.model.best_params))
self.state['best_params_pos'] = self.step
if self.test_data is not None:
rvals = self.model.validate(self.test_data)
else:
rvals = []
msg = '>>> Test'
pos = self.step // self.state['validFreq']
for k, v in rvals:
msg = msg + ' ' + k + ':%6.3f ' % v
self.timings['test' + k][pos] = float(v)
self.state['test' + k] = float(v)
print msg
self.state['testtime'] = float(time.time()-self.start_time)/60.
def save(self):
start = time.time()
print "Saving the model..."
# ignore keyboard interrupt while saving
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
numpy.savez(self.state['prefix']+'timing.npz',
**self.timings)
if self.state['overwrite']:
self.model.save(self.state['prefix']+'model.npz')
else:
self.model.save(self.state['prefix'] +
'model%d.npz' % self.save_iter)
cPickle.dump(self.state, open(self.state['prefix']+'state.pkl', 'w'))
self.save_iter += 1
signal.signal(signal.SIGINT, s)
print "Model saved, took {}".format(time.time() - start)
# FIXME
def load(self, model_path=None, timings_path=None, skip_timing=False):
if model_path is None:
model_path = self.state['prefix'] + 'model.npz'
if timings_path is None:
timings_path = self.state['prefix'] + 'timing.npz'
try:
self.model.load(model_path)
except Exception:
print 'mainLoop: Corrupted model file'
traceback.print_exc()
if not skip_timing:
try:
self.timings = dict(numpy.load(timings_path).iteritems())
print self.timings
except Exception:
print 'mainLoop: Corrupted timings file'
traceback.print_exc()
def main(self):
assert self.reset == -1
print_mem('start')
self.state['gotNaN'] = 0
start_time = time.time()
self.start_time = start_time
self.batch_start_time = time.time()
self.step = int(self.timings['step'])
self.algo.step = self.step
self.save_iter = 0
self.save()
if self.channel is not None:
self.channel.save()
self.save_time = time.time()
last_cost = 1.
self.state['clr'] = self.state['lr']
self.train_data.start(self.timings['next_offset']
if 'next_offset' in self.timings
else -1)
while (self.step < self.state['loopIters'] and
last_cost > .1*self.state['minerr'] and
(time.time() - start_time)/60. < self.state['timeStop'] and
self.state['lr'] > self.state['minlr']):
if self.step > 0 and (time.time() - self.save_time)/60. >= self.state['saveFreq']:
self.save()
if self.channel is not None:
self.channel.save()
self.save_time = time.time()
st = time.time()
try:
rvals = self.algo()
self.state['traincost'] = float(rvals['cost'])
self.state['step'] = self.step
last_cost = rvals['cost']
for name in rvals.keys():
self.timings[name][self.step] = float(numpy.array(rvals[name]))
if self.l2_params:
for param in self.model.params:
self.timings["l2_" + param.name][self.step] =\
numpy.mean(param.get_value() ** 2) ** 0.5
if (numpy.isinf(rvals['cost']) or
numpy.isnan(rvals['cost'])) and\
self.state['on_nan'] == 'raise':
self.state['gotNaN'] = 1
self.save()
if self.channel:
self.channel.save()
print 'Got NaN while training'
last_cost = 0
if self.valid_data is not None and\
self.step % self.state['validFreq'] == 0 and\
self.step > 1:
valcost = self.validate()
if valcost > self.old_cost * self.state['cost_threshold']:
self.patience -= 1
if 'lr_start' in self.state and\
self.state['lr_start'] == 'on_error':
self.state['lr_start'] = self.step
elif valcost < self.old_cost:
self.patience = self.state['patience']
self.old_cost = valcost
if self.state['divide_lr'] and \
self.patience < 1:
# Divide lr by 2
self.algo.lr = self.algo.lr / self.state['divide_lr']
bparams = dict(self.model.best_params)
self.patience = self.state['patience']
for p in self.model.params:
p.set_value(bparams[p.name])
if self.state['hookFreq'] > 0 and \
self.step % self.state['hookFreq'] == 0 and \
self.hooks:
[fn() for fn in self.hooks]
if self.reset > 0 and self.step > 1 and \
self.step % self.reset == 0:
print 'Resetting the data iterator'
self.train_data.reset()
self.step += 1
self.timings['step'] = self.step
self.timings['next_offset'] = self.train_data.next_offset
except KeyboardInterrupt:
break
self.state['wholetime'] = float(time.time() - start_time)
if self.valid_data is not None:
self.validate()
self.save()
if self.channel:
self.channel.save()
print 'Took', (time.time() - start_time)/60., 'min'
avg_step = self.timings['time_step'][:self.step].mean()
avg_cost2expl = self.timings['log2_p_expl'][:self.step].mean()
print "Average step took {}".format(avg_step)
print "That amounts to {} sentences in a day".format(1 / avg_step * 86400 * self.state['bs'])
print "Average log2 per example is {}".format(avg_cost2expl)
| bsd-3-clause | 8,178,595,542,425,499,000 | 37.239669 | 101 | 0.517758 | false |
igemsoftware/SYSU-Software-2015 | server/models/preload/equations/Tangliang/0801(1)donglihui Mosaicoli2.py | 1 | 1405 | ["LuxR-lux-AHL",["lux-AHL 3OC6HSL","LuxR-lux-AHL"],[("k_{Rlux}",0.1),("d",0.0231)],"{{k_{Rlux}}}*lux-AHL 3OC6HSL-{{k_{Rlux}}}*LuxR-lux-AHL-{{d}}*LuxR-lux-AHL"]
#all
["LuxR-lux-AHL",["LuxR-lux-AHL"],[("k_{Rlux}",0.1),("d",0.0231)],"-{{k_{Rlux}}}*LuxR-lux-AHL-{{d}}*LuxR-lux-AHL"]
#no lux-AHL 3OC6HSL
["LuxR-lux-AHL",["lux-AHL 3OC6HSL"],[("k_{Rlux}",0.1),("d",0.0231)],"{{k_{Rlux}}}*lux-AHL 3OC6HSL"]
#no LuxR-lux-AHL
["LuxR-lux-AHL",["LuxR","LuxR-lux-AHL"],[("k_{Rlux}",0.1),("d",0.0231)],"{{k_{Rlux}}}*LuxR-{{k_{Rlux}}}*LuxR-lux-AHL-{{d}}*LuxR-lux-AHL"]
#all
["LuxR-lux-AHL",["LuxR-lux-AHL"],[("k_{Rlux}",0.1),("d",0.0231)],"-{{k_{Rlux}}}*LuxR-lux-AHL-{{d}}*LuxR-lux-AHL"]
# no LuxR
["LuxR-lux-AHL",["LuxR"],[("k_{Rlux}",0.1),("d",0.0231)],"{{k_{Rlux}}}*LuxR"]
#no LuxR-lux-AHL
["Bxb1",["LuxR-lux-AHL","Plux"],[("k_{Rlux}",0.1),("d",0.0231),("alpha_{Bxb1}",0.588),("n",2),("Bxb1",150)],"{{alpha_{Bxb1}}}*{{Bxb1}}*LuxR-lux-AHL**{{n}}/({{k_{Rlux}}}**{{n}}+LuxR-lux-AHL**{{n}})-{{d}}*Bxb1"]
#all
["Bxb1",["Plux"],[("k_{Rlux}",0.1),("d",0.0231),("alpha_{Bxb1}",0.588),("n",2),("Bxb1",150)],"-{{d}}*Bxb1"]
# no LuxR-lux-AHL
["Bxb1",["LuxR-lux-AHL"],[("k_{Rlux}",0.1),("d",0.0231),("alpha_{Bxb1}",0.588),("n",2),("Bxb1",150)],"-{{d}}*Bxb1"]
# no Plux
["Bxb1",["LuxR-lux-AHL"],[("k_{Rlux}",0.1),("d",0.0231),("alpha_{Bxb1}",0.588),("n",2),("Bxb1",150)],"-{{d}}*Bxb1"]
#no LuxR-lux-AHL,Plux | lgpl-3.0 | -5,630,302,305,126,499,000 | 61.954545 | 209 | 0.507473 | false |
xmendez/wfuzz | src/wfuzz/plugins/payloads/permutation.py | 1 | 1709 | from wfuzz.externals.moduleman.plugin import moduleman_plugin
from wfuzz.plugin_api.base import BasePayload
from wfuzz.exception import FuzzExceptBadOptions
from wfuzz.fuzzobjects import FuzzWordType
@moduleman_plugin
class permutation(BasePayload):
name = "permutation"
author = ("Xavi Mendez (@xmendez)",)
version = "0.1"
description = ()
summary = "Returns permutations of the given charset and length."
category = ["default"]
priority = 99
parameters = (("ch", "", True, "Charset and len to permute in the form of abc-2."),)
default_parameter = "ch"
def __init__(self, params):
BasePayload.__init__(self, params)
self.charset = []
try:
ran = self.params["ch"].split("-")
self.charset = ran[0]
self.width = int(ran[1])
except ValueError:
raise FuzzExceptBadOptions('Bad range format (eg. "0-ffa")')
pset = []
for x in self.charset:
pset.append(x)
words = self.xcombinations(pset, self.width)
self.lista = []
for x in words:
self.lista.append("".join(x))
self.__count = len(self.lista)
def count(self):
return self.__count
def get_type(self):
return FuzzWordType.WORD
def get_next(self):
if self.lista != []:
payl = self.lista.pop()
return payl
else:
raise StopIteration
def xcombinations(self, items, n):
if n == 0:
yield []
else:
for i in range(len(items)):
for cc in self.xcombinations(items[:i] + items[i:], n - 1):
yield [items[i]] + cc
| gpl-2.0 | -24,799,782,009,886,480 | 26.564516 | 88 | 0.564073 | false |
AlienVault-Engineering/infra-buddy | src/main/python/infra_buddy/context/deploy_ctx.py | 1 | 12201 | import datetime
import json
import os
import re
import tempfile
from collections import OrderedDict
from pprint import pformat
from infra_buddy.aws import s3
from infra_buddy.context.artifact_definition import ArtifactDefinition
from infra_buddy.context.monitor_definition import MonitorDefinition
from infra_buddy.context.service_definition import ServiceDefinition
from infra_buddy.notifier.datadog_notifier import DataDogNotifier
from infra_buddy.template.template_manager import TemplateManager
from infra_buddy.utility import print_utility
STACK_NAME = 'STACK_NAME'
DOCKER_REGISTRY = 'DOCKER_REGISTRY_URL'
ROLE = 'ROLE'
IMAGE = 'IMAGE'
APPLICATION = 'APPLICATION'
ENVIRONMENT = 'ENVIRONMENT'
REGION = 'REGION'
SKIP_ECS = 'SKIP_ECS'
built_in = [DOCKER_REGISTRY, ROLE, APPLICATION, ENVIRONMENT, REGION, SKIP_ECS]
env_variables = OrderedDict()
env_variables['VPCAPP'] = "${VPCAPP}"
env_variables['DEPLOY_DATE'] = "${DEPLOY_DATE}"
env_variables[STACK_NAME] = "${ENVIRONMENT}-${APPLICATION}-${ROLE}"
env_variables['EnvName'] = "${STACK_NAME}" # alias
env_variables['ECS_SERVICE_STACK_NAME'] = "${STACK_NAME}" # alias
env_variables['VPC_STACK_NAME'] = "${ENVIRONMENT}-${VPCAPP}-vpc"
env_variables['CF_BUCKET_NAME'] = "${ENVIRONMENT}-${VPCAPP}-cloudformation-deploy-resources"
env_variables['TEMPLATE_BUCKET'] = "${ENVIRONMENT}-${VPCAPP}-cloudformation-deploy-resources" # alias
env_variables['CF_DEPLOY_RESOURCE_PATH'] = "${STACK_NAME}/${DEPLOY_DATE}"
env_variables['CONFIG_TEMPLATES_URL'] = "https://s3-${REGION}.amazonaws.com/${CF_BUCKET_NAME}/${CF_DEPLOY_RESOURCE_PATH}"
env_variables['CONFIG_TEMPLATES_EAST_URL'] = "https://s3.amazonaws.com/${CF_BUCKET_NAME}/${CF_DEPLOY_RESOURCE_PATH}"
env_variables['CLUSTER_STACK_NAME'] = "${ENVIRONMENT}-${APPLICATION}-cluster"
env_variables['RESOURCE_STACK_NAME'] = "${ENVIRONMENT}-${APPLICATION}-${ROLE}-resources"
env_variables['ECS_SERVICE_RESOURCE_STACK_NAME'] = "${RESOURCE_STACK_NAME}" # alias
env_variables['KEY_NAME'] = "${ENVIRONMENT}-${APPLICATION}"
env_variables['CHANGE_SET_NAME'] = "${STACK_NAME}-deploy-cloudformation-change-set"
class DeployContext(dict):
def __init__(self, defaults, environment):
super(DeployContext, self).__init__()
self.current_deploy = None
self.temp_files = []
self._initalize_defaults(defaults,environment)
@classmethod
def create_deploy_context_artifact(cls, artifact_directory, environment, defaults=None):
# type: (str, str) -> DeployContext
"""
:rtype DeployContext
:param artifact_directory: Path to directory containing service definition.
May be a s3 URL pointing at a zip archive
:param defaults: Path to json file containing default environment settings
"""
ret = DeployContext(defaults=defaults, environment=environment)
ret._initialize_artifact_directory(artifact_directory)
ret._initialize_environment_variables()
return ret
@classmethod
def create_deploy_context(cls, application, role, environment, defaults=None):
# type: (str, str, str, str) -> DeployContext
"""
:rtype DeployContext
:param application: Application name
:param role: Role of service
:param environment: Environment to deploy
:param defaults: Path to json file containing default environment settings
"""
ret = DeployContext(defaults=defaults, environment=environment)
ret['APPLICATION'] = application
ret['ROLE'] = role
ret._initialize_environment_variables()
return ret
def print_self(self):
print_utility.warn("Context:")
print_utility.warn("Stack: {}".format(self.stack_name))
if len(self.stack_name_cache)>0:
print_utility.warn("Depth: {}".format(self.stack_name_cache))
if self.current_deploy:
print_utility.banner_info("Deploy Defaults:",pformat(self.current_deploy.defaults))
print_utility.banner_info("Environment:",pformat(self))
def _initialize_artifact_directory(self, artifact_directory):
# type: (str) -> None
if artifact_directory.startswith("s3://"):
tmp_dir = tempfile.mkdtemp()
s3.download_zip_from_s3_url(artifact_directory, destination=tmp_dir)
artifact_directory = tmp_dir
service_definition = ServiceDefinition(artifact_directory, self['ENVIRONMENT'])
self[APPLICATION] = service_definition.application
self[ROLE] = service_definition.role
self[DOCKER_REGISTRY] = service_definition.docker_registry
self.update(service_definition.deployment_parameters)
self.service_definition = service_definition
self.artifact_definition = ArtifactDefinition.create_from_directory(artifact_directory)
self.monitor_definition = MonitorDefinition.create_from_directory(artifact_directory)
self.artifact_definition.register_env_variables(self)
def _initialize_environment_variables(self):
application = self['APPLICATION']
self['VPCAPP'] = application if not application or '-' not in application else application[:application.find('-')]
# allow for partial stack names for validation and introspection usecases
stack_template = "${ENVIRONMENT}"
if application:
stack_template += "-${APPLICATION}"
if self['ROLE']:
stack_template += "-${ROLE}"
env_variables[STACK_NAME] = stack_template
self['DEPLOY_DATE'] = datetime.datetime.now().strftime("%b_%d_%Y_Time_%H_%M")
for property_name in built_in:
self.__dict__[property_name.lower()] = self.get(property_name, None)
for variable, template in env_variables.items():
evaluated_template = self.expandvars(template)
self[variable] = evaluated_template
self.__dict__[variable.lower()] = evaluated_template
#s3 has non-standardized behavior in us-east-1 you can not use the region in the url
if self['REGION'] == 'us-east-1':
self['CONFIG_TEMPLATES_URL'] = self['CONFIG_TEMPLATES_EAST_URL']
self.__dict__['CONFIG_TEMPLATES_URL'.lower()] = self['CONFIG_TEMPLATES_EAST_URL']
print_utility.info("deploy_ctx = {}".format(repr(self.__dict__)))
def _initalize_defaults(self, defaults,environment):
self['DATADOG_KEY'] = ""
self['ENVIRONMENT'] = environment.lower() if environment else "dev"
if defaults:
self.update(defaults)
self.update(os.environ)
if 'REGION' not in self:
print_utility.warn("Region not configured using default 'us-west-1'. "
"This is probably not what you want - N. California is slow, like real slow."
" Set the environment variable 'REGION' or pass a default configuration file to override. ")
self['REGION'] = 'us-west-1'
self.template_manager = TemplateManager(self.get_deploy_templates(),self.get_service_modification_templates())
self.stack_name_cache = []
if self.get('DATADOG_KEY','') != '':
self.notifier = DataDogNotifier(key=self['DATADOG_KEY'],deploy_context=self)
else:
self.notifier = None
def get_deploy_templates(self):
return self.get('service-templates', {})
def get_service_modification_templates(self):
return self.get('service-modification-templates', {})
def generate_modification_stack_name(self, mod_name):
return "{ENVIRONMENT}-{APPLICATION}-{ROLE}-{mod_name}".format(mod_name=mod_name, **self)
def generate_modification_resource_stack_name(self, mod_name):
return "{ENVIRONMENT}-{APPLICATION}-{ROLE}-{mod_name}-resources".format(mod_name=mod_name, **self)
def get_region(self):
return self._get_required_default_configuration(REGION)
def _get_required_default_configuration(self, key):
region = self.get(key, os.environ.get(key, None))
if not region:
raise Exception("Required default not set {key}.\n"
"Configure --configuration-defaults or set ENVIRONMENT variable {key}".format(
key=key))
return region
def notify_event(self, title, type, message=None):
if self.notifier:
self.notifier.notify_event(title,type,message)
else:
print_utility.warn("Notify {type}: {title} - {message}".format(type=type,title=title,message=message))
def get_service_modifications(self):
return self.service_definition.service_modifications
def should_skip_ecs_trivial_update(self):
return self.get(SKIP_ECS, os.environ.get(SKIP_ECS, "True")) == "True"
def render_template(self, file,destination):
with open(file, 'r') as source:
with open(os.path.join(destination,os.path.basename(file).replace('.tmpl','')),'w+') as destination:
temp_file_path = os.path.abspath(destination.name)
print_utility.info("Rendering template to path: {}".format(temp_file_path))
self.temp_files.append(temp_file_path)
for line in source:
destination.write(self.expandvars(line))
return temp_file_path
def __del__(self):
for file in self.temp_files:
os.remove(file)
def get_execution_plan(self):
# type: () -> list(Deploy)
execution_plan = self.service_definition.generate_execution_plan(self.template_manager, self)
artifact_plan = self.artifact_definition.generate_execution_plan(self)
if artifact_plan:
execution_plan.extend(artifact_plan)
monitor_plan = self.monitor_definition.generate_execution_plan(self)
if monitor_plan:
execution_plan.extend(monitor_plan)
print_utility.progress("Execution Plan:")
for deploy in execution_plan:
print_utility.info_banner("\t"+str(deploy))
return execution_plan
def expandvars(self, template_string, aux_dict=None):
if not template_string: return template_string #if you pass none, return none
"""Expand ENVIRONMENT variables of form $var and ${var}.
"""
def replace_var(m):
if aux_dict:
val = aux_dict.get(m.group(2) or m.group(1), None)
if val is not None:return transform(val)
# if we are in a deployment values set in that context take precedent
if self.current_deploy is not None:
val = self.current_deploy.defaults.get(m.group(2) or m.group(1), None)
if val is not None:return transform(val)
return transform(self.get(m.group(2) or m.group(1), m.group(0)))
def transform( val):
if isinstance(val, bool):
return str(val).lower()
return str(val)
reVar = r'(?<!\\)\$(\w+|\{([^}]*)\})'
sub = re.sub(reVar, replace_var, template_string)
return sub
def recursive_expand_vars(self,source):
if isinstance(source,dict):
ret = {}
for key,value in source.items():
ret[key] = self.recursive_expand_vars(value)
return ret
elif isinstance(source, list):
ret = []
for item in source:
ret.append(self.recursive_expand_vars(item))
return ret
elif isinstance(source, str):
return self.expandvars(source)
else:
return source
def push_deploy_ctx(self, deploy_):
# type: (CloudFormationDeploy) -> None
if deploy_.stack_name:
self.stack_name_cache.append(self[STACK_NAME])
self._update_stack_name(deploy_.stack_name)
self.current_deploy = deploy_
def _update_stack_name(self, new_val):
self[STACK_NAME] = new_val
self.stack_name = new_val
def pop_deploy_ctx(self):
if self.current_deploy.stack_name:
new_val = self.stack_name_cache.pop()
self._update_stack_name(new_val)
self.current_deploy = None
| apache-2.0 | 3,817,613,296,716,474,400 | 44.02214 | 124 | 0.643554 | false |
jankeromnes/depot_tools | gcl.py | 1 | 49489 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""\
Wrapper script around Rietveld's upload.py that simplifies working with groups
of files.
"""
import json
import optparse
import os
import random
import re
import string
import sys
import tempfile
import time
import urllib2
import breakpad # pylint: disable=W0611
import fix_encoding
import gclient_utils
import git_cl
import presubmit_support
import rietveld
from scm import SVN
import subprocess2
from third_party import upload
__version__ = '1.2.1'
CODEREVIEW_SETTINGS = {
# To make gcl send reviews to a server, check in a file named
# "codereview.settings" (see |CODEREVIEW_SETTINGS_FILE| below) to your
# project's base directory and add the following line to codereview.settings:
# CODE_REVIEW_SERVER: codereview.yourserver.org
}
# globals that store the root of the current repository and the directory where
# we store information about changelists.
REPOSITORY_ROOT = ""
# Filename where we store repository specific information for gcl.
CODEREVIEW_SETTINGS_FILE = "codereview.settings"
CODEREVIEW_SETTINGS_FILE_NOT_FOUND = (
'No %s file found. Please add one.' % CODEREVIEW_SETTINGS_FILE)
# Warning message when the change appears to be missing tests.
MISSING_TEST_MSG = "Change contains new or modified methods, but no new tests!"
# Global cache of files cached in GetCacheDir().
FILES_CACHE = {}
# Valid extensions for files we want to lint.
DEFAULT_LINT_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_IGNORE_REGEX = r"$^"
def CheckHomeForFile(filename):
"""Checks the users home dir for the existence of the given file. Returns
the path to the file if it's there, or None if it is not.
"""
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
full_path = os.path.join(home, filename)
if os.path.exists(full_path):
return full_path
return None
def UnknownFiles():
"""Runs svn status and returns unknown files."""
return [
item[1] for item in SVN.CaptureStatus([], GetRepositoryRoot())
if item[0][0] == '?'
]
def GetRepositoryRoot():
"""Returns the top level directory of the current repository.
The directory is returned as an absolute path.
"""
global REPOSITORY_ROOT
if not REPOSITORY_ROOT:
REPOSITORY_ROOT = SVN.GetCheckoutRoot(os.getcwd())
if not REPOSITORY_ROOT:
raise gclient_utils.Error("gcl run outside of repository")
return REPOSITORY_ROOT
def GetInfoDir():
"""Returns the directory where gcl info files are stored."""
return os.path.join(GetRepositoryRoot(), '.svn', 'gcl_info')
def GetChangesDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'changes')
def GetCacheDir():
"""Returns the directory where gcl change files are stored."""
return os.path.join(GetInfoDir(), 'cache')
def GetCachedFile(filename, max_age=60*60*24*3, use_root=False):
"""Retrieves a file from the repository and caches it in GetCacheDir() for
max_age seconds.
use_root: If False, look up the arborescence for the first match, otherwise go
directory to the root repository.
Note: The cache will be inconsistent if the same file is retrieved with both
use_root=True and use_root=False. Don't be stupid.
"""
if filename not in FILES_CACHE:
# Don't try to look up twice.
FILES_CACHE[filename] = None
# First we check if we have a cached version.
try:
cached_file = os.path.join(GetCacheDir(), filename)
except (gclient_utils.Error, subprocess2.CalledProcessError):
return None
if (not os.path.exists(cached_file) or
(time.time() - os.stat(cached_file).st_mtime) > max_age):
dir_info = SVN.CaptureLocalInfo([], '.')
repo_root = dir_info['Repository Root']
if use_root:
url_path = repo_root
else:
url_path = dir_info['URL']
while True:
# Look in the repository at the current level for the file.
for _ in range(5):
content = None
try:
# Take advantage of the fact that svn won't output to stderr in case
# of success but will do in case of failure so don't mind putting
# stderr into content_array.
content_array = []
svn_path = url_path + '/' + filename
args = ['svn', 'cat', svn_path]
if sys.platform != 'darwin':
# MacOSX 10.5.2 has a bug with svn 1.4.4 that will trigger the
# 'Can\'t get username or password' and can be fixed easily.
# The fix doesn't work if the user upgraded to svn 1.6.x. Bleh.
# I don't have time to fix their broken stuff.
args.append('--non-interactive')
gclient_utils.CheckCallAndFilter(
args, cwd='.', filter_fn=content_array.append)
# Exit the loop if the file was found. Override content.
content = '\n'.join(content_array)
break
except (gclient_utils.Error, subprocess2.CalledProcessError):
if content_array[0].startswith(
'svn: Can\'t get username or password'):
ErrorExit('Your svn credentials expired. Please run svn update '
'to fix the cached credentials')
if content_array[0].startswith('svn: Can\'t get password'):
ErrorExit('If are using a Mac and svn --version shows 1.4.x, '
'please hack gcl.py to remove --non-interactive usage, it\'s'
'a bug on your installed copy')
if (content_array[0].startswith('svn: File not found:') or
content_array[0].endswith('path not found')):
break
# Otherwise, fall through to trying again.
if content:
break
if url_path == repo_root:
# Reached the root. Abandoning search.
break
# Go up one level to try again.
url_path = os.path.dirname(url_path)
if content is not None or filename != CODEREVIEW_SETTINGS_FILE:
# Write a cached version even if there isn't a file, so we don't try to
# fetch it each time. codereview.settings must always be present so do
# not cache negative.
gclient_utils.FileWrite(cached_file, content or '')
else:
content = gclient_utils.FileRead(cached_file, 'r')
# Keep the content cached in memory.
FILES_CACHE[filename] = content
return FILES_CACHE[filename]
def GetCodeReviewSetting(key):
"""Returns a value for the given key for this repository."""
# Use '__just_initialized' as a flag to determine if the settings were
# already initialized.
if '__just_initialized' not in CODEREVIEW_SETTINGS:
settings_file = GetCachedFile(CODEREVIEW_SETTINGS_FILE)
if settings_file:
CODEREVIEW_SETTINGS.update(
gclient_utils.ParseCodereviewSettingsContent(settings_file))
CODEREVIEW_SETTINGS.setdefault('__just_initialized', None)
return CODEREVIEW_SETTINGS.get(key, "")
def Warn(msg):
print >> sys.stderr, msg
def ErrorExit(msg):
print >> sys.stderr, msg
sys.exit(1)
def RunShellWithReturnCode(command, print_output=False):
"""Executes a command and returns the output and the return code."""
p = subprocess2.Popen(
command,
cwd=GetRepositoryRoot(),
stdout=subprocess2.PIPE,
stderr=subprocess2.STDOUT,
universal_newlines=True)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
if print_output:
print line.strip('\n')
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
p.stdout.close()
return output, p.returncode
def RunShell(command, print_output=False):
"""Executes a command and returns the output."""
return RunShellWithReturnCode(command, print_output)[0]
def FilterFlag(args, flag):
"""Returns True if the flag is present in args list.
The flag is removed from args if present.
"""
if flag in args:
args.remove(flag)
return True
return False
class ChangeInfo(object):
"""Holds information about a changelist.
name: change name.
issue: the Rietveld issue number or 0 if it hasn't been uploaded yet.
patchset: the Rietveld latest patchset number or 0.
description: the description.
files: a list of 2 tuple containing (status, filename) of changed files,
with paths being relative to the top repository directory.
local_root: Local root directory
rietveld: rietveld server for this change
"""
# Kept for unit test support. This is for the old format, it's deprecated.
SEPARATOR = "\n-----\n"
def __init__(self, name, issue, patchset, description, files, local_root,
rietveld_url, needs_upload):
# Defer the description processing to git_cl.ChangeDescription.
self._desc = git_cl.ChangeDescription(description)
self.name = name
self.issue = int(issue)
self.patchset = int(patchset)
self._files = files or []
self.patch = None
self._local_root = local_root
self.needs_upload = needs_upload
self.rietveld = gclient_utils.UpgradeToHttps(
rietveld_url or GetCodeReviewSetting('CODE_REVIEW_SERVER'))
self._rpc_server = None
@property
def description(self):
return self._desc.description
def force_description(self, new_description):
self._desc = git_cl.ChangeDescription(new_description)
self.needs_upload = True
def append_footer(self, line):
self._desc.append_footer(line)
def get_reviewers(self):
return self._desc.get_reviewers()
def NeedsUpload(self):
return self.needs_upload
def GetFileNames(self):
"""Returns the list of file names included in this change."""
return [f[1] for f in self._files]
def GetFiles(self):
"""Returns the list of files included in this change with their status."""
return self._files
def GetLocalRoot(self):
"""Returns the local repository checkout root directory."""
return self._local_root
def Exists(self):
"""Returns True if this change already exists (i.e., is not new)."""
return (self.issue or self.description or self._files)
def _NonDeletedFileList(self):
"""Returns a list of files in this change, not including deleted files."""
return [f[1] for f in self.GetFiles()
if not f[0].startswith("D")]
def _AddedFileList(self):
"""Returns a list of files added in this change."""
return [f[1] for f in self.GetFiles() if f[0].startswith("A")]
def Save(self):
"""Writes the changelist information to disk."""
data = json.dumps({
'issue': self.issue,
'patchset': self.patchset,
'needs_upload': self.NeedsUpload(),
'files': self.GetFiles(),
'description': self.description,
'rietveld': self.rietveld,
}, sort_keys=True, indent=2)
gclient_utils.FileWrite(GetChangelistInfoFile(self.name), data)
def Delete(self):
"""Removes the changelist information from disk."""
os.remove(GetChangelistInfoFile(self.name))
def RpcServer(self):
if not self._rpc_server:
if not self.rietveld:
ErrorExit(CODEREVIEW_SETTINGS_FILE_NOT_FOUND)
self._rpc_server = rietveld.CachingRietveld(self.rietveld, None, None)
return self._rpc_server
def CloseIssue(self):
"""Closes the Rietveld issue for this changelist."""
# Newer versions of Rietveld require us to pass an XSRF token to POST, so
# we fetch it from the server.
xsrf_token = self.SendToRietveld(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
# You cannot close an issue with a GET.
# We pass an empty string for the data so it is a POST rather than a GET.
data = [("description", self.description),
("xsrf_token", xsrf_token)]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/close' % self.issue, payload=body,
content_type=ctype)
def UpdateRietveldDescription(self):
"""Sets the description for an issue on Rietveld."""
data = [("description", self.description),]
ctype, body = upload.EncodeMultipartFormData(data, [])
self.SendToRietveld('/%d/description' % self.issue, payload=body,
content_type=ctype)
self.needs_upload = False
def GetIssueDescription(self):
"""Returns the issue description from Rietveld."""
return self.SendToRietveld('/%d/description' % self.issue)
def UpdateDescriptionFromIssue(self):
"""Updates self.description with the issue description from Rietveld."""
self._desc = git_cl.ChangeDescription(
self.SendToRietveld('/%d/description' % self.issue))
def AddComment(self, comment):
"""Adds a comment for an issue on Rietveld.
As a side effect, this will email everyone associated with the issue."""
return self.RpcServer().add_comment(self.issue, comment)
def PrimeLint(self):
"""Do background work on Rietveld to lint the file so that the results are
ready when the issue is viewed."""
if self.issue and self.patchset:
self.SendToRietveld('/lint/issue%s_%s' % (self.issue, self.patchset),
timeout=10)
def SendToRietveld(self, request_path, timeout=None, **kwargs):
"""Send a POST/GET to Rietveld. Returns the response body."""
try:
return self.RpcServer().Send(request_path, timeout=timeout, **kwargs)
except urllib2.URLError:
if timeout is None:
ErrorExit('Error accessing url %s' % request_path)
else:
return None
def MissingTests(self):
"""Returns True if the change looks like it needs unit tests but has none.
A change needs unit tests if it contains any new source files or methods.
"""
SOURCE_SUFFIXES = [".cc", ".cpp", ".c", ".m", ".mm"]
# Ignore third_party entirely.
files = [f for f in self._NonDeletedFileList()
if f.find("third_party") == -1]
added_files = [f for f in self._AddedFileList()
if f.find("third_party") == -1]
# If the change is entirely in third_party, we're done.
if len(files) == 0:
return False
# Any new or modified test files?
# A test file's name ends with "test.*" or "tests.*".
test_files = [test for test in files
if os.path.splitext(test)[0].rstrip("s").endswith("test")]
if len(test_files) > 0:
return False
# Any new source files?
source_files = [item for item in added_files
if os.path.splitext(item)[1] in SOURCE_SUFFIXES]
if len(source_files) > 0:
return True
# Do the long test, checking the files for new methods.
return self._HasNewMethod()
def _HasNewMethod(self):
"""Returns True if the changeset contains any new functions, or if a
function signature has been changed.
A function is identified by starting flush left, containing a "(" before
the next flush-left line, and either ending with "{" before the next
flush-left line or being followed by an unindented "{".
Currently this returns True for new methods, new static functions, and
methods or functions whose signatures have been changed.
Inline methods added to header files won't be detected by this. That's
acceptable for purposes of determining if a unit test is needed, since
inline methods should be trivial.
"""
# To check for methods added to source or header files, we need the diffs.
# We'll generate them all, since there aren't likely to be many files
# apart from source and headers; besides, we'll want them all if we're
# uploading anyway.
if self.patch is None:
self.patch = GenerateDiff(self.GetFileNames())
definition = ""
for line in self.patch.splitlines():
if not line.startswith("+"):
continue
line = line.strip("+").rstrip(" \t")
# Skip empty lines, comments, and preprocessor directives.
# TODO(pamg): Handle multiline comments if it turns out to be a problem.
if line == "" or line.startswith("/") or line.startswith("#"):
continue
# A possible definition ending with "{" is complete, so check it.
if definition.endswith("{"):
if definition.find("(") != -1:
return True
definition = ""
# A { or an indented line, when we're in a definition, continues it.
if (definition != "" and
(line == "{" or line.startswith(" ") or line.startswith("\t"))):
definition += line
# A flush-left line starts a new possible function definition.
elif not line.startswith(" ") and not line.startswith("\t"):
definition = line
return False
@staticmethod
def Load(changename, local_root, fail_on_not_found, update_status):
"""Gets information about a changelist.
Args:
fail_on_not_found: if True, this function will quit the program if the
changelist doesn't exist.
update_status: if True, the svn status will be updated for all the files
and unchanged files will be removed.
Returns: a ChangeInfo object.
"""
info_file = GetChangelistInfoFile(changename)
if not os.path.exists(info_file):
if fail_on_not_found:
ErrorExit("Changelist " + changename + " not found.")
return ChangeInfo(changename, 0, 0, '', None, local_root, None, False)
content = gclient_utils.FileRead(info_file)
save = False
try:
values = ChangeInfo._LoadNewFormat(content)
except ValueError:
try:
values = ChangeInfo._LoadOldFormat(content)
save = True
except ValueError:
ErrorExit(
('Changelist file %s is corrupt.\n'
'Either run "gcl delete %s" or manually edit the file') % (
info_file, changename))
files = values['files']
if update_status:
for item in files[:]:
status_result = SVN.CaptureStatus(item[1], local_root)
if not status_result or not status_result[0][0]:
# File has been reverted.
save = True
files.remove(item)
continue
status = status_result[0][0]
if status != item[0]:
save = True
files[files.index(item)] = (status, item[1])
change_info = ChangeInfo(
changename,
values['issue'],
values['patchset'],
values['description'],
files,
local_root,
values.get('rietveld'),
values['needs_upload'])
if save:
change_info.Save()
return change_info
@staticmethod
def _LoadOldFormat(content):
# The info files have the following format:
# issue_id, patchset\n (, patchset is optional)
# SEPARATOR\n
# filepath1\n
# filepath2\n
# .
# .
# filepathn\n
# SEPARATOR\n
# description
split_data = content.split(ChangeInfo.SEPARATOR, 2)
if len(split_data) != 3:
raise ValueError('Bad change format')
values = {
'issue': 0,
'patchset': 0,
'needs_upload': False,
'files': [],
}
items = split_data[0].split(', ')
if items[0]:
values['issue'] = int(items[0])
if len(items) > 1:
values['patchset'] = int(items[1])
if len(items) > 2:
values['needs_upload'] = (items[2] == "dirty")
for line in split_data[1].splitlines():
status = line[:7]
filename = line[7:]
values['files'].append((status, filename))
values['description'] = split_data[2]
return values
@staticmethod
def _LoadNewFormat(content):
return json.loads(content)
def __str__(self):
out = ['%s:' % self.__class__.__name__]
for k in dir(self):
if k.startswith('__'):
continue
v = getattr(self, k)
if v is self or callable(getattr(self, k)):
continue
out.append(' %s: %r' % (k, v))
return '\n'.join(out)
def GetChangelistInfoFile(changename):
"""Returns the file that stores information about a changelist."""
if not changename or re.search(r'[^\w-]', changename):
ErrorExit("Invalid changelist name: " + changename)
return os.path.join(GetChangesDir(), changename)
def LoadChangelistInfoForMultiple(changenames, local_root, fail_on_not_found,
update_status):
"""Loads many changes and merge their files list into one pseudo change.
This is mainly usefull to concatenate many changes into one for a 'gcl try'.
"""
changes = changenames.split(',')
aggregate_change_info = ChangeInfo(
changenames, 0, 0, '', None, local_root, None, False)
for change in changes:
aggregate_change_info._files += ChangeInfo.Load(
change, local_root, fail_on_not_found, update_status).GetFiles()
return aggregate_change_info
def GetCLs():
"""Returns a list of all the changelists in this repository."""
cls = os.listdir(GetChangesDir())
if CODEREVIEW_SETTINGS_FILE in cls:
cls.remove(CODEREVIEW_SETTINGS_FILE)
return cls
def GenerateChangeName():
"""Generate a random changelist name."""
random.seed()
current_cl_names = GetCLs()
while True:
cl_name = (random.choice(string.ascii_lowercase) +
random.choice(string.digits) +
random.choice(string.ascii_lowercase) +
random.choice(string.digits))
if cl_name not in current_cl_names:
return cl_name
def GetModifiedFiles():
"""Returns a set that maps from changelist name to (status,filename) tuples.
Files not in a changelist have an empty changelist name. Filenames are in
relation to the top level directory of the current repository. Note that
only the current directory and subdirectories are scanned, in order to
improve performance while still being flexible.
"""
files = {}
# Since the files are normalized to the root folder of the repositary, figure
# out what we need to add to the paths.
dir_prefix = os.getcwd()[len(GetRepositoryRoot()):].strip(os.sep)
# Get a list of all files in changelists.
files_in_cl = {}
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
for status, filename in change_info.GetFiles():
files_in_cl[filename] = change_info.name
# Get all the modified files down the current directory.
for line in SVN.CaptureStatus(None, os.getcwd()):
status = line[0]
filename = line[1]
if status[0] == "?":
continue
if dir_prefix:
filename = os.path.join(dir_prefix, filename)
change_list_name = ""
if filename in files_in_cl:
change_list_name = files_in_cl[filename]
files.setdefault(change_list_name, []).append((status, filename))
return files
def GetFilesNotInCL():
"""Returns a list of tuples (status,filename) that aren't in any changelists.
See docstring of GetModifiedFiles for information about path of files and
which directories are scanned.
"""
modified_files = GetModifiedFiles()
if "" not in modified_files:
return []
return modified_files[""]
def ListFiles(show_unknown_files):
files = GetModifiedFiles()
cl_keys = files.keys()
cl_keys.sort()
for cl_name in cl_keys:
if not cl_name:
continue
note = ""
change_info = ChangeInfo.Load(cl_name, GetRepositoryRoot(),
fail_on_not_found=True, update_status=False)
if len(change_info.GetFiles()) != len(files[cl_name]):
note = " (Note: this changelist contains files outside this directory)"
print "\n--- Changelist " + cl_name + note + ":"
for filename in files[cl_name]:
print "".join(filename)
if show_unknown_files:
unknown_files = UnknownFiles()
if (files.get('') or (show_unknown_files and len(unknown_files))):
print "\n--- Not in any changelist:"
for item in files.get('', []):
print "".join(item)
if show_unknown_files:
for filename in unknown_files:
print "? %s" % filename
return 0
def GenerateDiff(files):
return SVN.GenerateDiff(
files, GetRepositoryRoot(), full_move=False, revision=None)
def OptionallyDoPresubmitChecks(change_info, committing, args):
if FilterFlag(args, "--no_presubmit") or FilterFlag(args, "--force"):
breakpad.SendStack(
breakpad.DEFAULT_URL + '/breakpad',
'GclHooksBypassedCommit',
'Issue %s/%s bypassed hook when committing' %
(change_info.rietveld, change_info.issue),
verbose=False)
return presubmit_support.PresubmitOutput()
return DoPresubmitChecks(change_info, committing, True)
def defer_attributes(a, b):
"""Copy attributes from an object (like a function) to another."""
for x in dir(a):
if not getattr(b, x, None):
setattr(b, x, getattr(a, x))
def need_change(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not len(args) == 1:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(), True, True)
return function(change_info)
defer_attributes(function, hook)
hook.need_change = True
hook.no_args = True
return hook
def need_change_and_args(function):
"""Converts args -> change_info."""
# pylint: disable=W0612,W0621
def hook(args):
if not args:
ErrorExit("You need to pass a change list name")
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
return function(change_info, args)
defer_attributes(function, hook)
hook.need_change = True
return hook
def no_args(function):
"""Make sure no args are passed."""
# pylint: disable=W0612,W0621
def hook(args):
if args:
ErrorExit("Doesn't support arguments")
return function()
defer_attributes(function, hook)
hook.no_args = True
return hook
def attrs(**kwargs):
"""Decorate a function with new attributes."""
def decorate(function):
for k in kwargs:
setattr(function, k, kwargs[k])
return function
return decorate
@no_args
def CMDopened():
"""Lists modified files in the current directory down."""
return ListFiles(False)
@no_args
def CMDstatus():
"""Lists modified and unknown files in the current directory down."""
return ListFiles(True)
@need_change_and_args
@attrs(usage='[--no_presubmit] [--no_watchlists]')
def CMDupload(change_info, args):
"""Uploads the changelist to the server for review.
This does not submit a try job; use gcl try to submit a try job.
"""
if '-s' in args or '--server' in args:
ErrorExit('Don\'t use the -s flag, fix codereview.settings instead')
if not change_info.GetFiles():
print "Nothing to upload, changelist is empty."
return 0
output = OptionallyDoPresubmitChecks(change_info, False, args)
if not output.should_continue():
return 1
no_watchlists = (FilterFlag(args, "--no_watchlists") or
FilterFlag(args, "--no-watchlists"))
# Map --send-mail to --send_mail
if FilterFlag(args, "--send-mail"):
args.append("--send_mail")
# Replace -m with -t and --message with --title, but make sure to
# preserve anything after the -m/--message.
found_deprecated_arg = [False]
def replace_message(a):
if a.startswith('-m'):
found_deprecated_arg[0] = True
return '-t' + a[2:]
elif a.startswith('--message'):
found_deprecated_arg[0] = True
return '--title' + a[9:]
return a
args = map(replace_message, args)
if found_deprecated_arg[0]:
print >> sys.stderr, (
'\nWARNING: Use -t or --title to set the title of the patchset.\n'
'In the near future, -m or --message will send a message instead.\n'
'See http://goo.gl/JGg0Z for details.\n')
upload_arg = ["upload.py", "-y"]
upload_arg.append("--server=%s" % change_info.rietveld)
reviewers = change_info.get_reviewers() or output.reviewers
if (reviewers and
not any(arg.startswith('-r') or arg.startswith('--reviewer') for
arg in args)):
upload_arg.append('--reviewers=%s' % ','.join(reviewers))
upload_arg.extend(args)
desc_file = None
try:
if change_info.issue:
# Uploading a new patchset.
upload_arg.append("--issue=%d" % change_info.issue)
if not any(i.startswith('--title') or i.startswith('-t') for i in args):
upload_arg.append('--title= ')
else:
# First time we upload.
handle, desc_file = tempfile.mkstemp(text=True)
os.write(handle, change_info.description)
os.close(handle)
# Watchlist processing -- CC people interested in this changeset
# http://dev.chromium.org/developers/contributing-code/watchlists
if not no_watchlists:
import watchlists
watchlist = watchlists.Watchlists(change_info.GetLocalRoot())
watchers = watchlist.GetWatchersForPaths(change_info.GetFileNames())
cc_list = GetCodeReviewSetting("CC_LIST")
if not no_watchlists and watchers:
# Filter out all empty elements and join by ','
cc_list = ','.join(filter(None, [cc_list] + watchers))
if cc_list:
upload_arg.append("--cc=" + cc_list)
upload_arg.append("--file=%s" % desc_file)
if GetCodeReviewSetting("PRIVATE") == "True":
upload_arg.append("--private")
# If we have a lot of files with long paths, then we won't be able to fit
# the command to "svn diff". Instead, we generate the diff manually for
# each file and concatenate them before passing it to upload.py.
if change_info.patch is None:
change_info.patch = GenerateDiff(change_info.GetFileNames())
# Change the current working directory before calling upload.py so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
try:
issue, patchset = upload.RealMain(upload_arg, change_info.patch)
except KeyboardInterrupt:
sys.exit(1)
if issue and patchset:
change_info.issue = int(issue)
change_info.patchset = int(patchset)
change_info.Save()
change_info.PrimeLint()
finally:
os.chdir(previous_cwd)
finally:
if desc_file:
os.remove(desc_file)
print "*** Upload does not submit a try; use gcl try to submit a try. ***"
return 0
@need_change_and_args
@attrs(usage='[--upload]')
def CMDpresubmit(change_info, args):
"""Runs presubmit checks on the change.
The actual presubmit code is implemented in presubmit_support.py and looks
for PRESUBMIT.py files."""
if not change_info.GetFiles():
print('Nothing to presubmit check, changelist is empty.')
return 0
parser = optparse.OptionParser()
parser.add_option('--upload', action='store_true')
options, args = parser.parse_args(args)
if args:
parser.error('Unrecognized args: %s' % args)
if options.upload:
print('*** Presubmit checks for UPLOAD would report: ***')
return not DoPresubmitChecks(change_info, False, False)
else:
print('*** Presubmit checks for COMMIT would report: ***')
return not DoPresubmitChecks(change_info, True, False)
def TryChange(change_info, args, swallow_exception):
"""Create a diff file of change_info and send it to the try server."""
try:
import trychange
except ImportError:
if swallow_exception:
return 1
ErrorExit("You need to install trychange.py to use the try server.")
trychange_args = []
if change_info:
trychange_args.extend(['--name', change_info.name])
if change_info.issue:
trychange_args.extend(["--issue", str(change_info.issue)])
if change_info.patchset:
trychange_args.extend(["--patchset", str(change_info.patchset)])
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
else:
change = None
trychange_args.extend(args)
return trychange.TryChange(
trychange_args,
change=change,
swallow_exception=swallow_exception,
prog='gcl try',
extra_epilog='\n'
'When called from gcl, use the format gcl try <change_name>.\n')
@need_change_and_args
@attrs(usage='[--no_presubmit]')
def CMDcommit(change_info, args):
"""Commits the changelist to the repository."""
if not change_info.GetFiles():
print "Nothing to commit, changelist is empty."
return 1
# OptionallyDoPresubmitChecks has a side-effect which eats these flags.
bypassed = '--no_presubmit' in args or '--force' in args
output = OptionallyDoPresubmitChecks(change_info, True, args)
if not output.should_continue():
return 1
# We face a problem with svn here: Let's say change 'bleh' modifies
# svn:ignore on dir1\. but another unrelated change 'pouet' modifies
# dir1\foo.cc. When the user `gcl commit bleh`, foo.cc is *also committed*.
# The only fix is to use --non-recursive but that has its issues too:
# Let's say if dir1 is deleted, --non-recursive must *not* be used otherwise
# you'll get "svn: Cannot non-recursively commit a directory deletion of a
# directory with child nodes". Yay...
commit_cmd = ["svn", "commit"]
if change_info.issue:
# Get the latest description from Rietveld.
change_info.UpdateDescriptionFromIssue()
commit_desc = git_cl.ChangeDescription(change_info.description)
if change_info.issue:
server = change_info.rietveld
if not server.startswith("http://") and not server.startswith("https://"):
server = "http://" + server
commit_desc.append_footer('Review URL: %s/%d' % (server, change_info.issue))
handle, commit_filename = tempfile.mkstemp(text=True)
os.write(handle, commit_desc.description)
os.close(handle)
try:
handle, targets_filename = tempfile.mkstemp(text=True)
os.write(handle, "\n".join(change_info.GetFileNames()))
os.close(handle)
try:
commit_cmd += ['--file=' + commit_filename]
commit_cmd += ['--targets=' + targets_filename]
# Change the current working directory before calling commit.
output = ''
try:
output = RunShell(commit_cmd, True)
except subprocess2.CalledProcessError, e:
ErrorExit('Commit failed.\n%s' % e)
finally:
os.remove(commit_filename)
finally:
os.remove(targets_filename)
if output.find("Committed revision") != -1:
change_info.Delete()
if change_info.issue:
revision = re.compile(".*?\nCommitted revision (\d+)",
re.DOTALL).match(output).group(1)
viewvc_url = GetCodeReviewSetting('VIEW_VC')
if viewvc_url and revision:
change_info.append_footer('Committed: ' + viewvc_url + revision)
elif revision:
change_info.append_footer('Committed: ' + revision)
change_info.CloseIssue()
props = change_info.RpcServer().get_issue_properties(
change_info.issue, False)
patch_num = len(props['patchsets'])
comment = "Committed patchset #%d manually as r%s" % (patch_num, revision)
comment += ' (presubmit successful).' if not bypassed else '.'
change_info.AddComment(comment)
return 0
def CMDchange(args):
"""Creates or edits a changelist.
Only scans the current directory and subdirectories.
"""
# Verify the user is running the change command from a read-write checkout.
svn_info = SVN.CaptureLocalInfo([], '.')
if not svn_info:
ErrorExit("Current checkout is unversioned. Please retry with a versioned "
"directory.")
if len(args) == 0:
# Generate a random changelist name.
changename = GenerateChangeName()
elif args[0] == '--force':
changename = GenerateChangeName()
else:
changename = args[0]
change_info = ChangeInfo.Load(changename, GetRepositoryRoot(), False, True)
if len(args) == 2:
if not os.path.isfile(args[1]):
ErrorExit('The change "%s" doesn\'t exist.' % args[1])
f = open(args[1], 'rU')
override_description = f.read()
f.close()
else:
override_description = None
if change_info.issue and not change_info.NeedsUpload():
try:
description = change_info.GetIssueDescription()
except urllib2.HTTPError, err:
if err.code == 404:
# The user deleted the issue in Rietveld, so forget the old issue id.
description = change_info.description
change_info.issue = 0
change_info.Save()
else:
ErrorExit("Error getting the description from Rietveld: " + err)
else:
if override_description:
description = override_description
else:
description = change_info.description
other_files = GetFilesNotInCL()
# Edited files (as opposed to files with only changed properties) will have
# a letter for the first character in the status string.
file_re = re.compile(r"^[a-z].+\Z", re.IGNORECASE)
affected_files = [x for x in other_files if file_re.match(x[0])]
unaffected_files = [x for x in other_files if not file_re.match(x[0])]
description = description.rstrip() + '\n'
separator1 = ("\n---All lines above this line become the description.\n"
"---Repository Root: " + change_info.GetLocalRoot() + "\n"
"---Paths in this changelist (" + change_info.name + "):\n")
separator2 = "\n\n---Paths modified but not in any changelist:\n\n"
text = (description + separator1 + '\n' +
'\n'.join([f[0] + f[1] for f in change_info.GetFiles()]))
if change_info.Exists():
text += (separator2 +
'\n'.join([f[0] + f[1] for f in affected_files]) + '\n')
else:
text += ('\n'.join([f[0] + f[1] for f in affected_files]) + '\n' +
separator2)
text += '\n'.join([f[0] + f[1] for f in unaffected_files]) + '\n'
result = gclient_utils.RunEditor(text, False)
if not result:
ErrorExit('Running editor failed')
split_result = result.split(separator1, 1)
if len(split_result) != 2:
ErrorExit("Don't modify the text starting with ---!\n\n%r" % result)
# Update the CL description if it has changed.
new_description = split_result[0]
cl_files_text = split_result[1]
if new_description != description or override_description:
change_info.force_description(new_description)
new_cl_files = []
for line in cl_files_text.splitlines():
if not len(line):
continue
if line.startswith("---"):
break
status = line[:7]
filename = line[7:]
new_cl_files.append((status, filename))
if (not len(change_info.GetFiles()) and not change_info.issue and
not len(new_description) and not new_cl_files):
ErrorExit("Empty changelist not saved")
change_info._files = new_cl_files
change_info.Save()
if svn_info.get('URL', '').startswith('http:'):
Warn("WARNING: Creating CL in a read-only checkout. You will need to "
"commit using a commit queue!")
print change_info.name + " changelist saved."
if change_info.MissingTests():
Warn("WARNING: " + MISSING_TEST_MSG)
# Update the Rietveld issue.
if change_info.issue and change_info.NeedsUpload():
change_info.UpdateRietveldDescription()
change_info.Save()
return 0
@need_change_and_args
def CMDlint(change_info, args):
"""Runs cpplint.py on all the files in the change list.
Checks all the files in the changelist for possible style violations.
"""
# Access to a protected member _XX of a client class
# pylint: disable=W0212
try:
import cpplint
import cpplint_chromium
except ImportError:
ErrorExit("You need to install cpplint.py to lint C++ files.")
# Change the current working directory before calling lint so that it
# shows the correct base.
previous_cwd = os.getcwd()
os.chdir(change_info.GetLocalRoot())
try:
# Process cpplints arguments if any.
filenames = cpplint.ParseArguments(args + change_info.GetFileNames())
white_list = GetCodeReviewSetting("LINT_REGEX")
if not white_list:
white_list = DEFAULT_LINT_REGEX
white_regex = re.compile(white_list)
black_list = GetCodeReviewSetting("LINT_IGNORE_REGEX")
if not black_list:
black_list = DEFAULT_LINT_IGNORE_REGEX
black_regex = re.compile(black_list)
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print "Ignoring file %s" % filename
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print "Skipping file %s" % filename
finally:
os.chdir(previous_cwd)
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
return 1
def DoPresubmitChecks(change_info, committing, may_prompt):
"""Imports presubmit, then calls presubmit.DoPresubmitChecks."""
root_presubmit = GetCachedFile('PRESUBMIT.py', use_root=True)
change = presubmit_support.SvnChange(change_info.name,
change_info.description,
change_info.GetLocalRoot(),
change_info.GetFiles(),
change_info.issue,
change_info.patchset,
None)
output = presubmit_support.DoPresubmitChecks(
change=change,
committing=committing,
verbose=False,
output_stream=sys.stdout,
input_stream=sys.stdin,
default_presubmit=root_presubmit,
may_prompt=may_prompt,
rietveld_obj=change_info.RpcServer())
if not output.should_continue() and may_prompt:
# TODO(dpranke): move into DoPresubmitChecks(), unify cmd line args.
print "\nPresubmit errors, can't continue (use --no_presubmit to bypass)"
return output
@no_args
def CMDchanges():
"""Lists all the changelists and their files."""
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
print "\n--- Changelist " + change_info.name + ":"
for filename in change_info.GetFiles():
print "".join(filename)
return 0
@no_args
def CMDdeleteempties():
"""Delete all changelists that have no files."""
print "\n--- Deleting:"
for cl in GetCLs():
change_info = ChangeInfo.Load(cl, GetRepositoryRoot(), True, True)
if not len(change_info.GetFiles()):
print change_info.name
change_info.Delete()
return 0
@no_args
def CMDnothave():
"""Lists files unknown to Subversion."""
for filename in UnknownFiles():
print "? " + "".join(filename)
return 0
@attrs(usage='<svn options>')
def CMDdiff(args):
"""Diffs all files in the changelist or all files that aren't in a CL."""
files = None
if args:
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
files = change_info.GetFileNames()
else:
files = [f[1] for f in GetFilesNotInCL()]
root = GetRepositoryRoot()
cmd = ['svn', 'diff']
cmd.extend([os.path.join(root, x) for x in files])
cmd.extend(args)
return RunShellWithReturnCode(cmd, print_output=True)[1]
@no_args
def CMDsettings():
"""Prints code review settings for this checkout."""
# Force load settings
GetCodeReviewSetting("UNKNOWN")
del CODEREVIEW_SETTINGS['__just_initialized']
print '\n'.join(("%s: %s" % (str(k), str(v))
for (k,v) in CODEREVIEW_SETTINGS.iteritems()))
return 0
@need_change
def CMDdescription(change_info):
"""Prints the description of the specified change to stdout."""
print change_info.description
return 0
def CMDdelete(args):
"""Deletes a changelist."""
if not len(args) == 1:
ErrorExit('You need to pass a change list name')
filepath = GetChangelistInfoFile(args[0])
if not os.path.isfile(filepath):
ErrorExit('You need to pass a valid change list name')
os.remove(filepath)
return 0
def CMDtry(args):
"""Sends the change to the tryserver to do a test run on your code.
To send multiple changes as one path, use a comma-separated list of
changenames. Use 'gcl help try' for more information!"""
# When the change contains no file, send the "changename" positional
# argument to trychange.py.
# When the command is 'try' and --patchset is used, the patch to try
# is on the Rietveld server.
if not args:
ErrorExit("You need to pass a change list name")
if args[0].find(',') != -1:
change_info = LoadChangelistInfoForMultiple(args[0], GetRepositoryRoot(),
True, True)
else:
change_info = ChangeInfo.Load(args[0], GetRepositoryRoot(),
True, True)
if change_info.GetFiles():
args = args[1:]
else:
change_info = None
return TryChange(change_info, args, swallow_exception=False)
@attrs(usage='<old-name> <new-name>')
def CMDrename(args):
"""Renames an existing change."""
if len(args) != 2:
ErrorExit("Usage: gcl rename <old-name> <new-name>.")
src, dst = args
src_file = GetChangelistInfoFile(src)
if not os.path.isfile(src_file):
ErrorExit("Change '%s' does not exist." % src)
dst_file = GetChangelistInfoFile(dst)
if os.path.isfile(dst_file):
ErrorExit("Change '%s' already exists; pick a new name." % dst)
os.rename(src_file, dst_file)
print "Change '%s' renamed '%s'." % (src, dst)
return 0
def CMDpassthru(args):
"""Everything else that is passed into gcl we redirect to svn.
It assumes a change list name is passed and is converted with the files names.
"""
if not args or len(args) < 2:
ErrorExit("You need to pass a change list name for this svn fall-through "
"command")
cl_name = args[1]
args = ["svn", args[0]]
if len(args) > 1:
root = GetRepositoryRoot()
change_info = ChangeInfo.Load(cl_name, root, True, True)
args.extend([os.path.join(root, x) for x in change_info.GetFileNames()])
return RunShellWithReturnCode(args, print_output=True)[1]
def Command(name):
return getattr(sys.modules[__name__], 'CMD' + name, None)
def GenUsage(command):
"""Modify an OptParse object with the function's documentation."""
obj = Command(command)
display = command
more = getattr(obj, 'usage', '')
if command == 'help':
display = '<command>'
need_change_val = ''
if getattr(obj, 'need_change', None):
need_change_val = ' <change_list>'
options = ' [options]'
if getattr(obj, 'no_args', None):
options = ''
res = 'Usage: gcl %s%s%s %s\n\n' % (display, need_change_val, options, more)
res += re.sub('\n ', '\n', obj.__doc__)
return res
def CMDhelp(args):
"""Prints this help or help for the given command."""
if args and 'CMD' + args[0] in dir(sys.modules[__name__]):
print GenUsage(args[0])
# These commands defer to external tools so give this info too.
if args[0] == 'try':
TryChange(None, ['--help'], swallow_exception=False)
if args[0] == 'upload':
upload.RealMain(['upload.py', '--help'])
return 0
print GenUsage('help')
print sys.modules[__name__].__doc__
print 'version ' + __version__ + '\n'
print('Commands are:\n' + '\n'.join([
' %-12s %s' % (fn[3:], Command(fn[3:]).__doc__.split('\n')[0].strip())
for fn in dir(sys.modules[__name__]) if fn.startswith('CMD')]))
return 0
def main(argv):
if sys.hexversion < 0x02060000:
print >> sys.stderr, (
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0])
return 2
if not argv:
argv = ['help']
command = Command(argv[0])
# Help can be run from anywhere.
if command == CMDhelp:
return command(argv[1:])
try:
GetRepositoryRoot()
except (gclient_utils.Error, subprocess2.CalledProcessError):
print >> sys.stderr, 'To use gcl, you need to be in a subversion checkout.'
return 1
# Create the directories where we store information about changelists if it
# doesn't exist.
try:
if not os.path.exists(GetInfoDir()):
os.mkdir(GetInfoDir())
if not os.path.exists(GetChangesDir()):
os.mkdir(GetChangesDir())
if not os.path.exists(GetCacheDir()):
os.mkdir(GetCacheDir())
if command:
return command(argv[1:])
# Unknown command, try to pass that to svn
return CMDpassthru(argv)
except (gclient_utils.Error, subprocess2.CalledProcessError), e:
print >> sys.stderr, 'Got an exception'
print >> sys.stderr, str(e)
return 1
except upload.ClientLoginError, e:
print >> sys.stderr, 'Got an exception logging in to Rietveld'
print >> sys.stderr, str(e)
return 1
except urllib2.HTTPError, e:
if e.code != 500:
raise
print >> sys.stderr, (
'AppEngine is misbehaving and returned HTTP %d, again. Keep faith '
'and retry or visit go/isgaeup.\n%s') % (e.code, str(e))
return 1
if __name__ == "__main__":
fix_encoding.fix_encoding()
sys.exit(main(sys.argv[1:]))
| bsd-3-clause | 6,702,259,805,813,352,000 | 32.757844 | 80 | 0.648548 | false |
supersteph/supersteph.github.io | word2vec_as_MF.py | 1 | 12494 | import matplotlib.pyplot as plt
import os
import csv
import pickle
import operator
import numpy as np
from numpy.linalg import svd, qr
from scipy.spatial.distance import cosine
from scipy.sparse.linalg import svds
class Word2vecMF(object):
def __init__(self):
"""
Main class for working with word2vec as MF.
D -- word-context co-occurrence matrix;
B -- such matrix that B_cw = k*(#c)*(#w)/|D|;
C, W -- factors of matrix D decomposition;
vocab -- vocabulary of words from data;
inv_vocab -- inverse of dictionary.
"""
self.D = None
self.B = None
self.C = None
self.W = None
self.vocab = None
self.inv_vocab = None
############ Create training corpus from raw sentences ################
def create_vocabulary(self, data, r):
"""
Create a vocabulary from a list of sentences,
eliminating words which occur less than r times.
"""
prevocabulary = {}
for sentence in data:
for word in sentence:
if not prevocabulary.has_key(word):
prevocabulary[word] = 1
else:
prevocabulary[word] += 1
vocabulary = {}
idx = 0
for word in prevocabulary:
if (prevocabulary[word] >= r):
vocabulary[word] = idx
idx += 1
return vocabulary
def create_matrix_D(self, data, window_size=5):
"""
Create a co-occurrence matrix D from training corpus.
"""
dim = len(self.vocab)
D = np.zeros((dim, dim))
s = window_size/2
for sentence in data:
l = len(sentence)
for i in xrange(l):
for j in xrange(max(0,i-s), min(i+s+1,l)):
if (i != j and self.vocab.has_key(sentence[i])
and self.vocab.has_key(sentence[j])):
c = self.vocab[sentence[j]]
w = self.vocab[sentence[i]]
D[c][w] += 1
return D
def create_matrix_B(self, k):
"""
Create matrix B (defined in init).
"""
c_ = self.D.sum(axis=1)
w_ = self.D.sum(axis=0)
P = self.D.sum()
w_v, c_v = np.meshgrid(w_, c_)
B = k*(w_v*c_v)/float(P)
return B
######################### Necessary functions #########################
def sigmoid(self, X):
"""
Sigmoid function sigma(x)=1/(1+e^{-x}) of matrix X.
"""
Y = X.copy()
Y[X>20] = 1-1e-6
Y[X<-20] = 1e-6
Y[(X<20)&(X>-20)] = 1 / (1 + np.exp(-X[(X<20)&(X>-20)]))
return Y
def sigma(self, x):
"""
Sigmoid function of element x.
"""
if (x>20):
return 1-1e-6
if (x<-20):
return 1e-6
else:
return 1 / (1 + np.exp(-x))
def MF(self, C, W):
"""
Objective MF(D,C^TW) we want to minimize.
"""
X = C.T.dot(W)
MF = self.D*np.log(self.sigmoid(X)) + self.B*np.log(self.sigmoid(-X))
return -MF.mean()
def grad_MF(self, C, W):
"""
Gradient of the functional MF(D,C^TW) over C^TW.
"""
X = C.T.dot(W)
grad = self.D*self.sigmoid(-X) - self.B*self.sigmoid(X)
return grad
################# Alternating minimization algorithm ##################
def alt_min(self, eta=1e-7, d=100, MAX_ITER=1, from_iter=0, display=0,
init=(False, None, None), save=(False, None)):
"""
Alternating mimimization algorithm for word2vec matrix factorization.
"""
# Initialization
if (init[0]):
self.C = init[1]
self.W = init[2]
else:
self.C = np.random.rand(d, self.D.shape[0])
self.W = np.random.rand(d, self.D.shape[1])
if (save[0] and from_iter==0):
self.save_CW(save[1], 0)
for it in xrange(from_iter, from_iter+MAX_ITER):
if (display):
print "Iter #:", it+1
gradW = (self.C).dot(self.grad_MF(self.C, self.W))
self.W = self.W + eta*gradW
gradC = self.W.dot(self.grad_MF(self.C, self.W).T)
self.C = self.C + eta*gradC
if (save[0]):
self.save_CW(save[1], it+1)
#################### Projector splitting algorithm ####################
def projector_splitting(self, eta=5e-6, d=100,
MAX_ITER=1, from_iter=0, display=0,
init=(False, None, None), save=(False, None)):
"""
Projector splitting algorithm for word2vec matrix factorization.
"""
# Initialization
if (init[0]):
self.C = init[1]
self.W = init[2]
else:
self.C = np.random.rand(d, self.D.shape[0])
self.W = np.random.rand(d, self.D.shape[1])
if (save[0] and from_iter==0):
self.save_CW(save[1], 0)
X = (self.C).T.dot(self.W)
for it in xrange(from_iter, from_iter+MAX_ITER):
if (display):
print "Iter #:", it+1
U, S, V = svds(X, d)
S = np.diag(S)
V = V.T
self.C = U.dot(np.sqrt(S)).T
self.W = np.sqrt(S).dot(V.T)
if (save[0]):
self.save_CW(save[1], it+1)
F = self.grad_MF(self.C, self.W)
#mask = np.random.binomial(1, .5, size=F.shape)
#F = F * mask
U, _ = qr((X + eta*F).dot(V))
V, S = qr((X + eta*F).T.dot(U))
V = V.T
S = S.T
X = U.dot(S).dot(V)
def stochastic_ps(self, eta=5e-6, batch_size=100, d=100,
MAX_ITER=1, from_iter=0, display=0,
init=(False, None, None), save=(False, None)):
"""
Stochastic version of projector splitting."
"""
# Initialization
if (init[0]):
self.C = init[1]
self.W = init[2]
else:
self.C = np.random.rand(d, self.D.shape[0])
self.W = np.random.rand(d, self.D.shape[1])
if (save[0] and from_iter==0):
self.save_CW(save[1], 0)
pw = self.D.sum(axis=0) / self.D.sum()
pc_w = self.D / self.D.sum(axis=0)
X = (self.C).T.dot(self.W)
for it in xrange(from_iter, from_iter+MAX_ITER):
if (display):
print "Iter #:", it+1
U, S, V = svds(X, d)
S = np.diag(S)
V = V.T
self.C = U.dot(np.sqrt(S)).T
self.W = np.sqrt(S).dot(V.T)
if (save[0]):
self.save_CW(save[1], it+1)
# Calculate stochastic gradient matrix
F = np.zeros_like(self.D)
words = np.random.choice(self.D.shape[1], batch_size, p=pw)
for w in words:
contexts = np.random.choice(self.D.shape[0], 4, p=pc_w[:,w])
for c in contexts:
F[c,w] += self.sigma(X[c, w])
negatives = np.random.choice(self.D.shape[0], 5, p=pw)
for c in negatives:
F[c,w] -= 0.2 * self.sigma(X[c, w])
U, _ = qr((X + eta*F).dot(V))
V, S = qr((X + eta*F).T.dot(U))
V = V.T
S = S.T
X = U.dot(S).dot(V)
#######################################################################
############################## Data flow ##############################
#######################################################################
########################## Data to Matrices ###########################
def data_to_matrices(self, sentences, r, k, to_file):
"""
Process raw sentences, create word dictionary, matrix D and matrix B
then save them to file.
"""
self.vocab = self.create_vocabulary(sentences, r)
self.D = self.create_matrix_D(sentences)
self.B = self.create_matrix_B(k)
sorted_vocab = sorted(self.vocab.items(), key=operator.itemgetter(1))
vocab_to_save = np.array([item[0] for item in sorted_vocab])
np.savez(open(to_file, 'wb'), vocab=vocab_to_save, D=self.D, B=self.B)
######################### Matrices to Factors ##########################
def load_matrices(self, from_file):
"""
Load word dictionary, matrix D and matrix B from file.
"""
matrices = np.load(open(from_file, 'rb'))
self.D = matrices['D']
self.B = matrices['B']
self.vocab = {}
for i, word in enumerate(matrices['vocab']):
self.vocab[word] = i
self.inv_vocab = {v: k for k, v in self.vocab.items()}
def save_CW(self, to_folder, iteration):
"""
Save factors C and W (from some iteration) to some folder.
"""
if not os.path.exists(to_folder):
os.makedirs(to_folder)
pref = str(iteration)
np.savez(open(to_folder+'/C'+pref+'.npz', 'wb'), C=self.C)
np.savez(open(to_folder+'/W'+pref+'.npz', 'wb'), W=self.W)
########################### Factors to MF #############################
def load_CW(self, from_folder, iteration):
"""
Load factors C and W (from some iteration) from folder.
"""
if not os.path.exists(from_folder):
raise NameError('No such directory')
pref = str(iteration)
C = np.load(open(from_folder+'/C'+pref+'.npz', 'rb'))['C']
W = np.load(open(from_folder+'/W'+pref+'.npz', 'rb'))['W']
return C, W
def factors_to_MF(self, from_folder, to_file, MAX_ITER, from_iter=0):
"""
Calculate MF for given sequence of factors C and W
and save result to some file.
"""
MFs = np.zeros(MAX_ITER)
for it in xrange(from_iter, from_iter+MAX_ITER):
C, W = self.load_CW(from_folder, it)
MFs[it-from_iter] = self.MF(C, W)
np.savez(open(to_file, 'wb'), MF=MFs)
############################ MF to Figures ############################
def load_MF(self, from_file):
"""
Load MFs from file.
"""
MFs = np.load(open(from_file), 'rb')['MF']
return MFs
#######################################################################
######################### Linquistic metrics ##########################
#######################################################################
def word_vector(self, word, W):
"""
Get vector representation of a word.
"""
if word in self.vocab:
vec = W[:,int(self.vocab[word])]
else:
print "No such word in vocabulary."
vec = None
return vec
def nearest_words(self, word, top=20, display=False):
"""
Find the nearest words to the word
according to the cosine similarity.
"""
W = self.W / np.linalg.norm(self.W, axis=0)
if (type(word)==str):
vec = self.word_vector(word, W)
else:
vec = word / np.linalg.norm(word)
cosines = (vec.T).dot(W)
args = np.argsort(cosines)[::-1]
nws = []
for i in xrange(1, top+1):
nws.append(self.inv_vocab[args[i]])
if (display):
print self.inv_vocab[args[i]], round(cosines[args[i]],3)
return nws
| mit | 4,738,436,276,298,297,000 | 30.08209 | 78 | 0.421722 | false |
ZellMechanik-Dresden/ShapeOut | shapeout/gui/controls_filter.py | 1 | 21210 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import dclab
import wx
import wx.lib.agw.hypertreelist as HT
from dclab.definitions import feature_name2label
from ..settings import SettingsFile
from ..session import conversion
from .polygonselect import LineDrawerWindow
from .controls_subpanel import SubPanel
class SubPanelFilter(SubPanel):
def __init__(self, parent, *args, **kwargs):
SubPanel.__init__(self, parent, *args, **kwargs)
self.config = SettingsFile()
self.key = "Filtering"
def _box_rest_filter(self, analysis, key):
"""
Display rest like data event limit
"""
gen = wx.StaticBox(self, label="Other filters")
hbox = wx.StaticBoxSizer(gen, wx.VERTICAL)
items = analysis.GetParameters(key).items()
sortfunc = lambda x: (x[0].replace("max", "2")
.replace("min", "1"))
items.sort(key=sortfunc)
sgen = wx.FlexGridSizer(len(items), 1)
excludeend = ["min", "max"]
excludeis = ["enable filters"]
excludestart = ["polygon", "hierarchy"]
#sgen = wx.BoxSizer(wx.VERTICAL)
for item in items:
ins = True
for it in excludeend:
if item[0].endswith(it):
ins = False
for it in excludeis:
if item[0] == it:
ins = False
for it in excludestart:
if item[0].startswith(it):
ins = False
if not ins:
continue
stemp = self._create_type_wx_controls(analysis,
key, item)
sgen.Add(stemp)
sgen.Layout()
hbox.Add(sgen)
return hbox
def _box_hierarchy_filter(self, analysis, key):
"""
Display hierarchy filtering elements
"""
gen = wx.StaticBox(self, label="Filter Hierarchy")
hbox = wx.StaticBoxSizer(gen, wx.VERTICAL)
sgen = wx.GridBagSizer()
explanation = "Filter hierarchies can be used to apply\n"+\
"multiple filters in sequence or to\n"+\
"compare subpopulations in a data set."
sgen.Add(wx.StaticText(self, label=explanation), (0,0), span=(1,2))
sgen.Add(wx.StaticText(self, label="Select data set"+": "), (1,0),
flag=wx.ALIGN_CENTER_VERTICAL)
items = self.analysis.GetTitles()
self.WXCOMBO_hparent = wx.ComboBox(self, choices=items)
sgen.Add(self.WXCOMBO_hparent, (1,1))
self.WXCOMBO_hparent.Bind(wx.EVT_COMBOBOX, self.OnHierarchySelParent)
sgen.Add(wx.StaticText(self, label="Hierarchy parent"+": "), (2,0))
self.WXTextHParent = wx.StaticText(self, label="")
sgen.Add(self.WXTextHParent, (2,1), flag=wx.EXPAND)
self.WXbtnnew = wx.Button(self, wx.ID_ANY, label="Create hierarchy child")
sgen.Add(self.WXbtnnew, (3,0), span=(1,2), flag=wx.EXPAND)
self.WXbtnnew.Bind(wx.EVT_BUTTON, self.OnHierarchyCreateChild)
sgen.Layout()
hbox.Add(sgen)
if len(items):
self.WXCOMBO_hparent.SetSelection(0)
self.OnHierarchySelParent()
return hbox
def _box_minmax_filter(self, analysis, key):
"""
Display everything with Min/Max
"""
gen = wx.StaticBox(self, label="Box Filters")
hbox = wx.StaticBoxSizer(gen, wx.VERTICAL)
items = analysis.GetParameters(key).items()
sortfunc = lambda x: (x[0].replace("max", "2")
.replace("min", "1"))
items.sort(key=sortfunc)
sgen = wx.FlexGridSizer(len(items), 2)
for item in items:
if item[0].endswith("min"):
if item[0][:-4] in analysis.GetUnusableAxes():
# ignore this item
continue
# find item with max
feat = item[0][:-4]
idmax = [ii[0] for ii in items].index(feat+" max")
itemmax = items[idmax]
a = wx.StaticText(self, label="Range "+feat)
b = wx.TextCtrl(self, value=str(item[1]), name=item[0])
b.SetToolTip(wx.ToolTip("Minimum "+feature_name2label[feat]))
c = wx.TextCtrl(self, value=str(itemmax[1]), name=itemmax[0])
c.SetToolTip(wx.ToolTip("Maximum "+feature_name2label[feat]))
stemp = wx.BoxSizer(wx.HORIZONTAL)
sgen.Add(a, 0, wx.ALIGN_CENTER_VERTICAL)
stemp.Add(b)
stemp.Add(c)
sgen.Add(stemp)
elif item[0].endswith("max"):
# did that before
pass
else:
pass
sgen.Layout()
hbox.Add(sgen)
return hbox
def _box_polygon_filter(self, analysis):
## Polygon box
# layout:
# new selection box
# duplicate (multiple selections)
# invert
# delete
# import
# export preview plot
# export all (own axis w/ label)
polybox = wx.StaticBox(self, name="",
label="Polygon Filters")
# sizers
polysizer = wx.StaticBoxSizer(polybox, wx.HORIZONTAL)
horsizer = wx.BoxSizer(wx.HORIZONTAL)
optsizer = wx.BoxSizer(wx.VERTICAL)
plotsizer = wx.BoxSizer(wx.VERTICAL)
horsizer.Add(optsizer)
horsizer.Add(plotsizer)
polysizer.Add(horsizer)
## left column
# new
new = wx.Button(self, label="New")
new.Bind(wx.EVT_BUTTON, self.OnPolygonWindow)
optsizer.Add(new)
# duplicate
duplicate = wx.Button(self, label="Duplicate")
duplicate.Bind(wx.EVT_BUTTON, self.OnPolygonDuplicate)
optsizer.Add(duplicate)
# edit
invert = wx.Button(self, label="Invert")
invert.Bind(wx.EVT_BUTTON, self.OnPolygonInvert)
optsizer.Add(invert)
# remove
remove = wx.Button(self, label="Remove")
remove.Bind(wx.EVT_BUTTON, self.OnPolygonRemove)
optsizer.Add(remove)
# import
imp = wx.Button(self, label="Import")
imp.Bind(wx.EVT_BUTTON, self.OnPolygonImport)
optsizer.Add(imp)
## right column
# dropdown (plot selection)
choice_be = analysis.GetTitles()
cbg = wx.ComboBox(self, -1, choices=choice_be,
value="None", name="None",
style=wx.CB_DROPDOWN|wx.CB_READONLY)
if choice_be:
cbg.SetSelection(len(choice_be) - 1)
cbg.SetValue(choice_be[-1])
cbg.Bind(wx.EVT_COMBOBOX, self.OnPolygonCombobox)
plotsizer.Add(cbg)
# htree control for polygon filter selection
pol_filters = dclab.PolygonFilter.instances
htreectrl = HT.HyperTreeList(self, name="Polygon Filter Selection",
agwStyle=wx.TR_DEFAULT_STYLE|wx.TR_HIDE_ROOT|\
HT.TR_NO_HEADER|HT.TR_EDIT_LABELS)
htreectrl.DeleteAllItems()
# We are setting names as editable here. However, we cannot do any
# event handling here, so we can only change the name in the underlying
# dclab.polygon_filter instance at certain function calls. That should
# be enough, though. Use self..
htreectrl.AddColumn("", edit=True)
htreectrl.SetColumnWidth(0, 500)
rroot = htreectrl.AddRoot("", ct_type=0)
for p in pol_filters:
# filtit =
htreectrl.AppendItem(rroot, p.name, ct_type=1,
data=p.unique_id)
htreectrl.Bind(HT.EVT_TREE_ITEM_CHECKED, self.OnPolygonHtreeChecked)
# This is covered by self.OnPolygonCombobox()
# FIL = analysis.GetParameters("Filtering")
# if (FIL.has_key("Polygon Filters") and
# p.unique_id in FIL["Polygon Filters"]):
# filtit.Check(True)
htreectrl.SetMinSize((200,120))
plotsizer.Add(htreectrl, 1, wx.EXPAND, 3)
# export
horsizer2 = wx.BoxSizer(wx.HORIZONTAL)
export = wx.Button(self, label="Export")
export.Bind(wx.EVT_BUTTON, self.OnPolygonExport)
horsizer2.Add(export)
# export_all
export_all = wx.Button(self, label="Export All")
export_all.Bind(wx.EVT_BUTTON, self.OnPolygonExportAll)
horsizer2.Add(export_all)
plotsizer.Add(horsizer2)
self._polygon_filter_combo_box = cbg
self._polygon_filter_selection_htree = htreectrl
self.OnPolygonCombobox()
return polysizer
def _set_polygon_filter_names(self):
"""
Set the polygon filter names from the UI in the underlying
dclab.polygon_filter classes.
"""
# get selection from htree
ctrls = self.GetChildren()
# identify controls via their name correspondence in the cfg
for c in ctrls:
if c.GetName() == "Polygon Filter Selection":
# get the selected items
r = c.GetRootItem()
for ch in r.GetChildren():
# get the name.
name = ch.GetText()
unique_id = ch.GetData()
p = dclab.PolygonFilter.get_instance_from_id(unique_id)
p.name = name
def GetPolygonHtreeChecked(self):
""" Returns
"""
checked = list()
# get selection from htree
ctrls = self.GetChildren()
# identify controls via their name correspondence in the cfg
for c in ctrls:
if c.GetName() == "Polygon Filter Selection":
# get the selected items
r = c.GetRootItem()
for ch in r.GetChildren():
if ch.IsChecked():
checked.append(ch)
# else
return checked
def GetPolygonHtreeSelected(self):
""" Returns
"""
# get selection from htree
ctrls = self.GetChildren()
# identify controls via their name correspondence in the cfg
for c in ctrls:
if c.GetName() == "Polygon Filter Selection":
# get the selected items
r = c.GetRootItem()
for ch in r.GetChildren():
if ch.IsSelected():
return c, ch
# else
return c, None
def OnHierarchyCreateChild(self, e=None):
"""
Called when the user wants to create a new hierarchy child.
Will create a new RT-DC dataset that is appended to
`self.analysis`.
In the end, the entire control panel is updated to give the
user access to the new data set.
"""
self._set_polygon_filter_names()
sel = self.WXCOMBO_hparent.GetSelection()
mm = self.analysis[sel]
ds = dclab.new_dataset(mm)
self.analysis.append(ds)
self.funcparent.OnChangePlot()
def OnHierarchySelParent(self, e=None):
"""
Called when an RT-DC dataset is selected in the combobox.
This methods updates the label of `self.WXTextHParent`.
"""
sel = self.WXCOMBO_hparent.GetSelection()
mm = self.analysis[sel]
hp = mm.config["filtering"]["hierarchy parent"]
if hp.lower() == "none":
label = "no parent"
else:
label = mm.hparent.title
self.WXTextHParent.SetLabel(label)
def OnPolygonCombobox(self, e=None):
"""
Called when the user selects a different item in the plot selection
combobox. We will mark the activated filters for that plot. in the
selection box below.
ComboBox:: self._polygon_filter_combo_box
HTreeCtrl: self._polygon_filter_selection_htree
"""
htreectrl = self._polygon_filter_selection_htree
cmb = self._polygon_filter_combo_box
# get selection
sel = cmb.GetSelection()
# get measurement
mm = self.analysis[sel]
# get filters
r = htreectrl.GetRootItem()
if "polygon filters" in mm.config["filtering"]:
filterlist = mm.config["filtering"]["polygon filters"]
#print(filterlist)
# set visible filters
for item in r.GetChildren():
#print("looking at", item.GetData())
if item.GetData() in filterlist:
#print("will check")
htreectrl.CheckItem(item, True)
else:
#print("wont check")
htreectrl.CheckItem(item, False)
else:
# Uncheck everything, because mm does not know Filtering
for item in r.GetChildren():
htreectrl.CheckItem(item, False)
def OnPolygonDuplicate(self, e=None):
self._set_polygon_filter_names()
_c, ch = self.GetPolygonHtreeSelected()
if ch is None:
return
unique_id = ch.GetData()
p = dclab.PolygonFilter.get_instance_from_id(unique_id)
dclab.PolygonFilter(points=p.points,
axes=p.axes,
name=p.name+" (copy)")
self.UpdatePanel()
def OnPolygonExport(self, e=None, export_all=False):
self._set_polygon_filter_names()
if not export_all:
_c, ch = self.GetPolygonHtreeSelected()
if ch is None:
return
dlg = wx.FileDialog(self, "Open polygon file",
self.config.get_path(name="Polygon"), "",
"Shape-Out polygon file (*.poly)|*.poly", wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath().encode("utf-8")
self.config.set_path(dlg.GetDirectory(),
name="Polygon")
dlg.Destroy()
else:
self.config.set_path(dlg.GetDirectory(),
name="Polygon")
dlg.Destroy()
return # nothing more to do here
if not fname.endswith(".poly"):
fname += ".poly"
if not export_all:
unique_id = ch.GetData()
p = dclab.PolygonFilter.get_instance_from_id(unique_id)
p.save(fname)
else:
# export all
dclab.PolygonFilter.save_all(fname)
def OnPolygonExportAll(self, e=None):
self._set_polygon_filter_names()
if len(dclab.PolygonFilter.instances) != 0:
self.OnPolygonExport(export_all=True)
def OnPolygonHtreeChecked(self, e=None):
"""
This function is called when an item in the htreectrl is checked
or unchecked. We apply the corresponding filters to the underlying
RT-DC data set live.
ComboBox:: self._polygon_filter_combo_box
HTreeCtrl: self._polygon_filter_selection_htree
"""
htreectrl = self._polygon_filter_selection_htree
cmb = self._polygon_filter_combo_box
# get selection
sel = cmb.GetSelection()
# get measurement
mm = self.analysis[sel]
# get filters
newfilterlist = list()
# set visible filters
r = htreectrl.GetRootItem()
for item in r.GetChildren():
if item.IsChecked():
#print(item.GetData(), "checked")
newfilterlist.append(item.GetData())
else:
#print(item.GetData(), "unhecked")
pass
# apply filters to data set
mm.config["filtering"]["polygon filters"] = newfilterlist
def OnPolygonImport(self, e=None):
dlg = wx.FileDialog(self, "Open polygon file",
self.config.get_path(name="Polygon"), "",
"Shape-Out polygon file (*.poly)|*.poly", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath().encode("utf-8")
self.config.set_path(dlg.GetDirectory(),
name="Polygon")
dlg.Destroy()
else:
self.config.set_path(dlg.GetDirectory(),
name="Polygon")
dlg.Destroy()
return # nothing more to do here
if not fname.endswith(".poly"):
fname += ".poly"
# Convert polygon filters from old exports
newfname = conversion.convert_polygon(infile=fname)
dclab.PolygonFilter.import_all(newfname)
self.UpdatePanel()
# cleanup
os.remove(newfname)
def OnPolygonInvert(self, e=None):
self._set_polygon_filter_names()
_c, ch = self.GetPolygonHtreeSelected()
if ch is None:
return
unique_id = ch.GetData()
p = dclab.PolygonFilter.get_instance_from_id(unique_id)
dclab.PolygonFilter(points=p.points,
axes=p.axes,
name=p.name+" (inverted)",
inverted=(not p.inverted),
)
self.UpdatePanel()
def OnPolygonRemove(self, e=None):
c, ch = self.GetPolygonHtreeSelected()
if ch is None:
return
unique_id = ch.GetData()
dclab.PolygonFilter.remove(unique_id)
self.analysis.PolygonFilterRemove(unique_id)
c.Delete(ch)
self.funcparent.frame.PlotArea.Plot(self.analysis)
def OnPolygonWindow(self, e=None):
""" Called when user wants to add a new polygon filter """
ldw = LineDrawerWindow(self.funcparent,
self.funcparent.OnPolygonFilter)
# get plot that we want to use
name = self._polygon_filter_combo_box.GetValue()
idn = self._polygon_filter_combo_box.GetSelection()
if idn < 0:
idn = 0
xax, yax = self.analysis.GetPlotAxes()
mm = self.analysis[idn]
ldw.show_scatter(mm, xax=xax, yax=yax)
ldw.Show()
def UpdatePanel(self, analysis=None):
if analysis is None:
# previous analysis is used
analysis = self.analysis
if hasattr(self, "_polygon_filter_combo_box"):
old_meas_selection = self._polygon_filter_combo_box.GetSelection()
else:
old_meas_selection = 0
self.analysis = analysis
self.ClearSubPanel()
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Box filters
fbox = self._box_minmax_filter(analysis, "Filtering")
sizer.Add(fbox)
sizerv = wx.BoxSizer(wx.VERTICAL)
sizer.Add(sizerv)
# Polygon filters
polysizer = self._box_polygon_filter(analysis)
sizerv.Add(polysizer)
# Hierarchy filters:
rbox = self._box_hierarchy_filter(analysis, "Filtering")
sizerv.Add(rbox)
# Rest filters:
rbox = self._box_rest_filter(analysis, "Filtering")
sizerv.Add(rbox)
## Polygon box
# layout:
# new selection box
# duplicate (multiple selections)
# edit
# delete
# import
# export preview plot
# export all (own axis w/ label)
#
## Polygon selection
# line_drawing example
# - selection: choose which data to be displayed
# - on press enter, draw polygon and allow new selection.
# - buttons: add, clear, clear all
# (- show other polygons with selection box)
vertsizer = wx.BoxSizer(wx.VERTICAL)
filten = analysis.GetParameters("filtering")["enable filters"]
cb = self._create_type_wx_controls(analysis,
"Filtering",
["enable filters", filten],)
vertsizer.Add(cb)
btn_apply = wx.Button(self, label="Apply")
## TODO:
# write function in this class that gives ControlPanel a new
# analysis, such that OnChangeFilter becomes shorter.
self.Bind(wx.EVT_BUTTON, self.funcparent.OnChangeFilter, btn_apply)
vertsizer.Add(btn_apply)
btn_reset = wx.Button(self, label="Reset")
self.Bind(wx.EVT_BUTTON, self.OnReset, btn_reset)
vertsizer.Add(btn_reset)
# Set the previously selected measurement
self._polygon_filter_combo_box.SetSelection(old_meas_selection)
# Make the htree control below the combobox aware of this selection
self.OnPolygonCombobox()
sizer.Add(vertsizer)
self.BindEnableName(ctrl_source="limit events auto",
value=False,
ctrl_targets=["limit events"])
self.SetSizer(sizer)
sizer.Fit(self)
| gpl-2.0 | 5,554,710,041,866,036,000 | 34.468227 | 82 | 0.546959 | false |
logston/pester | models.py | 1 | 5903 | import re
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
# phone number regex
pnum_pattern = re.compile(r'[0-9]{10}')
def validate_pnum(pnum):
"""Raise validation error if not a 10 digit phone number"""
if not re.match(pnum_pattern, pnum):
raise ValidationError(u'%s is not a valid phone number'%pnum)
class API(models.Model):
"""Model detialing the API params"""
name = models.CharField(max_length=32, unique=True)
key = models.CharField(max_length=200)
params = models.TextField(blank=True)
def __unicode__(self):
return self.name
class Carrier(models.Model):
"""Model connecting cellular SMS providers to email addresses"""
name = models.CharField(max_length=32)
gateway = models.CharField(max_length=64)
updated = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.name
class User(models.Model):
"""Model describing a user of the Pester app"""
first_name = models.CharField(max_length=16)
last_name = models.CharField(max_length=16)
email = models.EmailField(unique=True)
join_date = models.DateTimeField(auto_now_add=True)
phone_number = models.CharField(
validators=[validate_pnum],
unique=True,
max_length=10)
carrier = models.ForeignKey(Carrier)
def __unicode__(self):
return (self.last_name+', '+self.first_name+
' -- e['+self.email+'] p['+self.phone_number+']')
class Recipient(models.Model):
"""Model decribing a potential recipient of a pester"""
first_name = models.CharField(max_length=16)
last_name = models.CharField(max_length=16)
email = models.EmailField(unique=True)
phone_number = models.CharField(
validators=[validate_pnum],
unique=True,
max_length=10)
carrier = models.ForeignKey(Carrier)
created_by = models.ForeignKey(User)
def __unicode__(self):
return (self.last_name+', '+self.first_name+
' -- e['+self.email+'] p['+self.phone_number+']')
class Pattern(models.Model):
"""Model describing a sending pattern for a Pestering"""
name = models.CharField(max_length=32)
description = models.CharField(max_length=256)
code = models.CharField(max_length=32)
def __unicode__(self):
return self.name
def __str__(self):
return self.__unicode__()
class Pestering(models.Model):
"""Model describing a pestering from User to Recipient"""
user = models.ForeignKey(User)
recipient = models.ForeignKey(Recipient)
search_term = models.CharField(max_length=64)
pattern = models.ForeignKey(Pattern)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
NEITHER = 'N'
EMAIL = 'E'
TEXT = 'T'
BOTH = 'B'
NOTIFY_METHODS = (
(NEITHER, 'None!'),
(EMAIL, 'By Email'),
(TEXT, 'By Text'),
(BOTH, 'By Text and Email'),
)
notify_user_method = models.CharField(
max_length=1,
choices=NOTIFY_METHODS,
default=EMAIL)
notify_recipient_method = models.CharField(
max_length=1,
choices=NOTIFY_METHODS,
default=TEXT)
title = models.CharField(max_length=140)
OFF, MEDIUM, HIGH = 'O', 'M', 'H'
ADULT_LEVELS = (
(OFF, 'Off'),
(MEDIUM, 'Medium'),
(HIGH, 'High'))
adult_safety_level = models.CharField(
max_length=1,
choices=ADULT_LEVELS,
default=MEDIUM)
def __unicode__(self):
return ''.join((str(self.user.first_name),
' -> ',
str(self.recipient.first_name),
' | ',
str(self.search_term),
' | ',
str(self.pattern)))
class APICall(models.Model):
"""Model to record api calls"""
api = models.ForeignKey(API, null=True)
call_time = models.DateTimeField(auto_now_add=True)
pestering = models.ForeignKey(Pestering, null=True)
def __unicode__(self):
return str(self.api) + ' | ' + str(self.pestering.search_term)
class ImageData(models.Model):
"""Model describing """
search_term = models.CharField(max_length=64)
url = models.URLField(unique=True)
file_type = models.CharField(max_length=64)
width = models.PositiveSmallIntegerField()
height = models.PositiveSmallIntegerField()
OFF, MEDIUM, HIGH = 'O', 'M', 'H'
ADULT_LEVELS = (
(OFF, 'Off'),
(MEDIUM, 'Medium'),
(HIGH, 'High'))
adult_safety_level = models.CharField(
max_length=1,
choices=ADULT_LEVELS,
default=MEDIUM)
def __unicode__(self):
return self.search_term+' ('+self.url+')'
class PesteringManagerRun(models.Model):
"""Model to record cron jobs and their success"""
run_time = models.DateTimeField(auto_now_add=True)
completed = models.NullBooleanField()
def __unicode__(self):
return str(self.run_time)
class PesteringAttempt(models.Model):
"""Model to record attempted Pesterings"""
pestering = models.ForeignKey(Pestering)
pestering_manager_run = models.ForeignKey(PesteringManagerRun)
image = models.ForeignKey(ImageData, null=True, blank=True, default=None)
attempt_time = models.DateTimeField(auto_now_add=True)
success = models.NullBooleanField()
def __unicode__(self):
return str(self.pestering)+' sent at '+str(self.attempt_time)
class PesteringException(models.Model):
"""Model to record exceptions of Pesterings"""
pestering_attempt = models.ForeignKey(PesteringAttempt)
exception_traceback = models.TextField()
def __unicode__(self):
return 'Exception for Pestering Attempt '+str(self.pestering_attempt)
| gpl-2.0 | -8,974,291,282,706,841,000 | 31.977654 | 77 | 0.623581 | false |
GoogleCloudPlatform/mimus-game-simulator | mimus_cfg.py | 1 | 3414 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=invalid-name
"""Config dictionary for mimus."""
import os
cfg = c = {}
# GCP Auth parameters
c['gcp'] = {}
c['gcp']['project'] = os.getenv('GCP_PROJECT', 'joeholley-mimus01')
# db api parameters (for choosing different db backends)
c['db_api'] = {}
c['db_api']['dir'] = 'db_api'
# db connection parameters
c['db_con'] = {}
c['db_con']['timeout'] = 30
# DB API Cloud Pub/Sub connection parameters
c['pubsub'] = {}
c['pubsub']['topic'] = os.getenv('DB_WORKER_TOPIC', 'queriestoprocess')
c['pubsub']['sub'] = os.getenv('DB_WORKER_SUB', 'dbworkersub')
# DB API Redis connection parameters
c['redis_con'] = {}
c['redis_con']['db'] = 0
# Docker env var format: REDIS_PORT=tcp://172.17.0.2:6379
redis_host, redis_port = os.getenv(
'REDIS_PORT', 'tcp://mimus-redis:6379').split('/')[-1].split(':')
c['redis_con']['hostname'] = redis_host
c['redis_con']['port'] = int(redis_port)
c['redis_con']['password'] = '9dc1b3ae-584c-434e-b899-da2c8ad093fb'
# Player parameters
c['player'] = {}
c['player']['initial_cards'] = {}
c['player']['initial_cards']['std'] = 5
c['player']['initial_cards']['stone'] = 1
c['player']['initial_loadout'] = {}
c['player']['initial_loadout']['stones'] = 5
c['player']['initial_loadout']['points'] = 1000
c['player']['initial_loadout']['slots'] = 50
c['player']['initial_loadout']['stamina'] = 5
# Card (unit) parameters
c['card'] = {}
c['card']['xp_limit'] = 10000 # Max XP for a unit
# Parameters for evolving ('consuming') cards
c['evolve'] = {}
c['evolve']['min_time'] = 3 # min time this action will take
c['evolve']['max_time'] = 3 # max time this action will take
c['evolve']['fail_time'] = 3 # time this action takes if it fails
c['evolve']['min_cards'] = 2 # min number of cards consumed
c['evolve']['max_cards'] = 5 # max number of cards consumed
# Parameters for leveling ('combining') cards
c['level'] = {}
c['level']['min_time'] = 3 # min time this action will take
c['level']['max_time'] = 3 # max time this action will take
c['level']['fail_time'] = 3 # time this action takes if it fails
c['level']['min_cards'] = 1 # min number of cards consumed
c['level']['max_cards'] = 5 # max number of cards consumed
# Stage parameters
c['stage'] = {}
c['stage']['min_time'] = 30 # min time this action will take
c['stage']['max_time'] = 90 # max time this action will take
c['stage']['fail_time'] = 30 # time this action takes if it fails
c['stage']['failure_chance'] = 0.90 # chance to simulate player failing stage
c['stage']['points_per_run'] = 10 # friends points earned per stage played
# Loot tables
c['loot_tables'] = {}
c['loot_tables']['std'] = {'drop_chance': 0.35, 'min': 1, 'max': 500}
c['loot_tables']['point'] = {'drop_chance': 1.00, 'min': 1, 'max': 750}
c['loot_tables']['stone'] = {'drop_chance': 1.00, 'min': 500, 'max': 1000}
| apache-2.0 | 6,784,159,353,470,306,000 | 36.933333 | 78 | 0.654657 | false |
akariv/redash | redash/handlers/api.py | 1 | 6007 | from flask_restful import Api
from werkzeug.wrappers import Response
from flask import make_response
from redash.utils import json_dumps
from redash.handlers.base import org_scoped_rule
from redash.handlers.alerts import AlertResource, AlertListResource, AlertSubscriptionListResource, AlertSubscriptionResource
from redash.handlers.dashboards import DashboardListResource, RecentDashboardsResource, DashboardResource, DashboardShareResource
from redash.handlers.data_sources import DataSourceTypeListResource, DataSourceListResource, DataSourceSchemaResource, DataSourceResource, DataSourcePauseResource
from redash.handlers.events import EventResource
from redash.handlers.queries import QueryRefreshResource, QueryListResource, QueryRecentResource, QuerySearchResource, QueryResource
from redash.handlers.query_results import QueryResultListResource, QueryResultResource, JobResource
from redash.handlers.users import UserResource, UserListResource, UserInviteResource, UserResetPasswordResource
from redash.handlers.visualizations import VisualizationListResource
from redash.handlers.visualizations import VisualizationResource
from redash.handlers.widgets import WidgetResource, WidgetListResource
from redash.handlers.groups import GroupListResource, GroupResource, GroupMemberListResource, GroupMemberResource, \
GroupDataSourceListResource, GroupDataSourceResource
from redash.handlers.destinations import DestinationTypeListResource, DestinationResource, DestinationListResource
class ApiExt(Api):
def add_org_resource(self, resource, *urls, **kwargs):
urls = [org_scoped_rule(url) for url in urls]
return self.add_resource(resource, *urls, **kwargs)
api = ApiExt()
@api.representation('application/json')
def json_representation(data, code, headers=None):
# Flask-Restful checks only for flask.Response but flask-login uses werkzeug.wrappers.Response
if isinstance(data, Response):
return data
resp = make_response(json_dumps(data), code)
resp.headers.extend(headers or {})
return resp
api.add_org_resource(AlertResource, '/api/alerts/<alert_id>', endpoint='alert')
api.add_org_resource(AlertSubscriptionListResource, '/api/alerts/<alert_id>/subscriptions', endpoint='alert_subscriptions')
api.add_org_resource(AlertSubscriptionResource, '/api/alerts/<alert_id>/subscriptions/<subscriber_id>', endpoint='alert_subscription')
api.add_org_resource(AlertListResource, '/api/alerts', endpoint='alerts')
api.add_org_resource(DashboardListResource, '/api/dashboards', endpoint='dashboards')
api.add_org_resource(RecentDashboardsResource, '/api/dashboards/recent', endpoint='recent_dashboards')
api.add_org_resource(DashboardResource, '/api/dashboards/<dashboard_slug>', endpoint='dashboard')
api.add_org_resource(DashboardShareResource, '/api/dashboards/<dashboard_id>/share', endpoint='dashboard_share')
api.add_org_resource(DataSourceTypeListResource, '/api/data_sources/types', endpoint='data_source_types')
api.add_org_resource(DataSourceListResource, '/api/data_sources', endpoint='data_sources')
api.add_org_resource(DataSourceSchemaResource, '/api/data_sources/<data_source_id>/schema')
api.add_org_resource(DataSourcePauseResource, '/api/data_sources/<data_source_id>/pause')
api.add_org_resource(DataSourceResource, '/api/data_sources/<data_source_id>', endpoint='data_source')
api.add_org_resource(GroupListResource, '/api/groups', endpoint='groups')
api.add_org_resource(GroupResource, '/api/groups/<group_id>', endpoint='group')
api.add_org_resource(GroupMemberListResource, '/api/groups/<group_id>/members', endpoint='group_members')
api.add_org_resource(GroupMemberResource, '/api/groups/<group_id>/members/<user_id>', endpoint='group_member')
api.add_org_resource(GroupDataSourceListResource, '/api/groups/<group_id>/data_sources', endpoint='group_data_sources')
api.add_org_resource(GroupDataSourceResource, '/api/groups/<group_id>/data_sources/<data_source_id>', endpoint='group_data_source')
api.add_org_resource(EventResource, '/api/events', endpoint='events')
api.add_org_resource(QuerySearchResource, '/api/queries/search', endpoint='queries_search')
api.add_org_resource(QueryRecentResource, '/api/queries/recent', endpoint='recent_queries')
api.add_org_resource(QueryListResource, '/api/queries', endpoint='queries')
api.add_org_resource(QueryRefreshResource, '/api/queries/<query_id>/refresh', endpoint='query_refresh')
api.add_org_resource(QueryResource, '/api/queries/<query_id>', endpoint='query')
api.add_org_resource(QueryResultListResource, '/api/query_results', endpoint='query_results')
api.add_org_resource(QueryResultResource,
'/api/query_results/<query_result_id>',
'/api/queries/<query_id>/results.<filetype>',
'/api/queries/<query_id>/results/<query_result_id>.<filetype>',
endpoint='query_result')
api.add_org_resource(JobResource, '/api/jobs/<job_id>', endpoint='job')
api.add_org_resource(UserListResource, '/api/users', endpoint='users')
api.add_org_resource(UserResource, '/api/users/<user_id>', endpoint='user')
api.add_org_resource(UserInviteResource, '/api/users/<user_id>/invite', endpoint='user_invite')
api.add_org_resource(UserResetPasswordResource, '/api/users/<user_id>/reset_password', endpoint='user_reset_password')
api.add_org_resource(VisualizationListResource, '/api/visualizations', endpoint='visualizations')
api.add_org_resource(VisualizationResource, '/api/visualizations/<visualization_id>', endpoint='visualization')
api.add_org_resource(WidgetListResource, '/api/widgets', endpoint='widgets')
api.add_org_resource(WidgetResource, '/api/widgets/<int:widget_id>', endpoint='widget')
api.add_org_resource(DestinationTypeListResource, '/api/destinations/types', endpoint='destination_types')
api.add_org_resource(DestinationResource, '/api/destinations/<destination_id>', endpoint='destination')
api.add_org_resource(DestinationListResource, '/api/destinations', endpoint='destinations')
| bsd-2-clause | 3,339,268,984,470,864,400 | 64.293478 | 162 | 0.786915 | false |
ExcaliburZero/r-dailyprogrammer-solutions | 2015/11/23-Funny-Plant.py | 1 | 1154 | # Problem: 242 [Easy] Funny Plant
# https://www.reddit.com/r/dailyprogrammer/comments/3twuwf/20151123_challenge_242_easy_funny_plant/
# Author: ExcaliburZero
# License: MIT
import fileinput
def main():
# Iterate over each of the input lines
for line in fileinput.input():
# Get the input values
line_contents = line.split()
people = int(line_contents[0])
starting_fruits = int(line_contents[1])
# Create a list of the plants
plants = [0] * starting_fruits
# Create a counter for weeks
weeks = 1
# Keep simulating weeks until there is enough food
while(people > sum(plants)):
# Increment the week counter
weeks += 1
# Increase the growth amount of each of the plants
for i in range(len(plants)):
plants[i] += 1
# Record the number of seeds
seeds = sum(plants)
# Create new plants from the seeds
plants = plants + [0] * seeds
# Print out the calculated result
print(weeks)
# Run the main function
if __name__ == '__main__':
main()
| mit | 2,605,174,256,789,166,000 | 30.189189 | 99 | 0.587522 | false |
Amandil/django-tech-test | loans/tests/tests_ui_apply_loan.py | 1 | 4471 | import unittest, os
from django.test import TestCase
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from django.test import LiveServerTestCase
TITLE_PREFIX = 'GrowthStreet Loans - '
from .pages.sign_in import SignInPage
from .pages.register import RegisterPage
from .pages.add_business import AddBusinessPage
from .pages.apply_loan import ApplyLoanPage
from loans.models import Business
class TestRegistration(LiveServerTestCase):
def setUp(self):
self.driver = webdriver.PhantomJS()
super(TestRegistration, self).setUp()
def tearDown(self):
self.driver.quit()
super(TestRegistration, self).tearDown()
'''
Users must be able to add a business and apply for a loan
Then, using the same business, apply for another one
'''
def test_journey_apply_loan_bussiness(self):
# Registering a new user
self.driver.get(self.live_server_url + "/")
SignInPage.click_register_link(self.driver)
RegisterPage.complete_form(self.driver, 'John', 'Doe', '[email protected]', 'correct-horse-battery-staple', '+44 7765 222 4567')
# Signing in
SignInPage.sign_in(self.driver, '[email protected]', 'correct-horse-battery-staple')
# User should be logged in and dashboard should work right after registration
self.driver.get(self.live_server_url + "/dashboard")
self.assertEquals(TITLE_PREFIX + 'Homepage', self.driver.title);
# Initially, no loans should be present
loan_list_test = self.get_element("loan-list").text
self.assertTrue("don't seem to have" in loan_list_test)
# Starting to apply for a loan
self.get_element('apply-loan').click()
# We should end up on the first step of the application
self.assertEquals(TITLE_PREFIX + 'Loan Application - Step 1', self.driver.title);
# Initially, no businesses should exist
loan_list_test = self.get_element("business-list").text
self.assertTrue("don't seem to have" in loan_list_test)
# Adding a business
self.get_element('add-business').click()
# We should end up on the add business page
self.assertEquals(TITLE_PREFIX + 'Loan Application - Add Business', self.driver.title);
# Completing the form
crn = "09264172"
AddBusinessPage.complete_form(self.driver, crn, 'ACME Inc.', 'Retail', 'Building and Number', 'Street', 'London', 'W8 5EH')
# We should end up on the loan form page
self.assertTrue(TITLE_PREFIX + 'Loan Application - Step 2', self.driver.title)
self.driver.get(self.live_server_url + "/apply/loan-application/3/" + crn)
ApplyLoanPage.complete_form(self.driver, "20000", "2018-12-02", "Sample reason here")
# We should end up on the success page
self.assertTrue(TITLE_PREFIX + 'Loan Application - Success', self.driver.title)
# Return button should take us to homepage
self.get_element("return").click()
self.assertEquals(TITLE_PREFIX + 'Homepage', self.driver.title);
# Homepage should now have our loans
loan_list_test = self.get_element("loan-list").text
self.assertTrue("ACME Inc." in loan_list_test)
# Applying for a now loan
self.get_element('apply-loan').click()
# Select business page should have the previously added business
business_list = self.get_element("business-list").text
self.assertTrue("ACME Inc." in business_list)
# Clicking on said business
self.get_element(crn).click()
# We should end up on the loan form page
self.assertTrue(TITLE_PREFIX + 'Loan Application - Step 2', self.driver.title)
self.driver.get(self.live_server_url + "/apply/loan-application/3/" + crn)
ApplyLoanPage.complete_form(self.driver, "20000", "2018-12-02", "Sample reason here")
# We should end up on the success page
self.assertTrue(TITLE_PREFIX + 'Loan Application - Success', self.driver.title)
# Return button should take us to homepage
self.get_element("return").click()
self.assertEquals(TITLE_PREFIX + 'Homepage', self.driver.title);
# Shortcut for find_element_by_id
def get_element(self, id):
return self.driver.find_element_by_id(id)
| bsd-3-clause | -1,086,600,208,650,795,400 | 37.543103 | 136 | 0.674122 | false |
jacobwindsor/pubchem-ranker | manage.py | 1 | 1837 | import sys
from flask_script import Manager
from CompoundRanker import app
from CompoundRanker.DataManipulators.CIDGatherer import CIDGatherer
from CompoundRanker.DataManipulators.PubChemAssayCounter import PubChemAssayCounter
from CompoundRanker.DataManipulators.PubChemPathwayCounter import PubChemPathwayCounter
from CompoundRanker.DataManipulators.DataGatherer import DataGatherer
from CompoundRanker.database import init_db, query_db
manager = Manager(app)
@manager.command
def initdb():
"""Initializes the database."""
init_db()
print('Initialized the database.')
@manager.command
def fillmetabs(path, dataset):
"""
Fill the metabolites table with data.
"""
# Get the CAS with CIDs
file = open(path, 'r')
gatherer = DataGatherer(file)
data = gatherer.harvest()
# Insert
gatherer.save(data, dataset)
print("Saved")
@manager.command
def fillcids(dataset):
"""Gather the CIDs from PubChem for the metabolites and save to pubchem_compounds table"""
query = "SELECT id FROM datasets WHERE name = ?"
try:
dataset_id = str(query_db(query, [dataset])[0]['id'])
except TypeError:
raise TypeError("No dataset with name '%s'" % dataset)
gatherer = CIDGatherer()
data = gatherer.harvest(dataset_id)
gatherer.save(data)
print("Saved!")
@manager.command
def fillcounts(dataset):
"""Run the counter (ranker) for the metabolites and save to database"""
query = "SELECT id FROM datasets WHERE name = ?"
try:
dataset_id = str(query_db(query, [dataset])[0]['id'])
except TypeError:
raise TypeError("No dataset with name '%s'" % dataset)
PubChemPathwayCounter().count(dataset_id).save()
PubChemAssayCounter().count(dataset_id).save()
print("Saved!")
if __name__ == "__main__":
manager.run()
| mit | -5,863,559,762,759,437,000 | 28.15873 | 94 | 0.702232 | false |
ooici/coi-services | ion/services/sa/tcaa/test/test_terrestrial_endpoint.py | 1 | 31848 | #!/usr/bin/env python
"""
@package ion.services.sa.tcaa.test.test_terrestrial_endpoint
@file ion/services/sa/tcaa/test/test_terrestrial_endpoint.py
@author Edward Hunter
@brief Test cases for 2CAA terrestrial endpoint.
"""
__author__ = 'Edward Hunter'
# Pyon log and config objects.
from pyon.public import log
from pyon.public import CFG
# Standard imports.
import time
import os
import signal
import time
import unittest
from datetime import datetime
import uuid
import socket
import re
import random
# 3rd party imports.
import gevent
from gevent import spawn
from gevent.event import AsyncResult
from nose.plugins.attrib import attr
from mock import patch
# Pyon unittest support.
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from pyon.core.bootstrap import get_sys_name
from pyon.public import IonObject
from pyon.event.event import EventPublisher, EventSubscriber
from pyon.util.context import LocalContextMixin
from ion.services.sa.tcaa.terrestrial_endpoint import TerrestrialEndpoint
from ion.services.sa.tcaa.terrestrial_endpoint import TerrestrialEndpointClient
from interface.services.icontainer_agent import ContainerAgentClient
from interface.objects import TelemetryStatusType
from ion.services.sa.tcaa.r3pc import R3PCServer
from ion.services.sa.tcaa.r3pc import R3PCClient
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_process_queued
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_process_online
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_remote_late
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_get_clear_queue
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_pop_pending_queue
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_repeated_clear_pop
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_get_pending
# bin/nosetests -s -v ion/services/sa/tcaa/test/test_terrestrial_endpoint.py:TestTerrestrialEndpoint.test_persistent_queue
class FakeProcess(LocalContextMixin):
"""
A fake process used because the test case is not an ion process.
"""
name = ''
id=''
process_type = ''
@attr('INT', group='sa')
@patch.dict(CFG, {'endpoint':{'receive':{'timeout': 60}}})
class TestTerrestrialEndpoint(IonIntegrationTestCase):
"""
Test cases for 2CAA terrestrial endpoint.
"""
def setUp(self):
"""
Setup fake remote components.
Start remote server.
Set internal configuration and test variables.
Start container.
Start services.
Spawn endpoint.
Create and start subscribers.
"""
# Create fake remote client and server.
# Add clean up to shut down properly.
# Start remote server on a random port.
self._remote_server = R3PCServer(self.consume_req, self.remote_server_close)
self._remote_client = R3PCClient(self.consume_ack, self.remote_client_close)
self.addCleanup(self._remote_server.stop)
self.addCleanup(self._remote_client.stop)
self._other_port = self._remote_server.start('*', 0)
log.debug('Remote server binding to *:%i', self._other_port)
# Set internal variables.
self._other_host = 'localhost'
self._xs_name = 'remote1'
self._svc_name = 'terrestrial_endpoint'
self._listen_name = self._svc_name + self._xs_name
self._platform_resource_id = 'abc123'
self._resource_id = 'fake_id'
self._rmt_svc_name = 'fake_svc'
self._no_requests = 10
self._requests_sent = {}
self._results_recv = {}
self._workers = []
self._done_evt = AsyncResult()
self._queue_mod_evts = []
self._cmd_tx_evts = []
self._telem_evts = []
self._no_telem_evts = 0
self._no_queue_mod_evts = 0
self._no_cmd_tx_evts = 0
self._done_queue_mod_evts = AsyncResult()
self._done_telem_evts = AsyncResult()
self._done_cmd_tx_evts = AsyncResult()
# Start container.
log.debug('Staring capability container.')
self._start_container()
# Bring up services in a deploy file (no need to message)
log.info('Staring deploy services.')
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
# Create a container client.
log.debug('Creating container client.')
self._container_client = ContainerAgentClient(node=self.container.node,
name=self.container.name)
# The following spawn config creates the process with the remote
# name tagged to the service name.
"""
listen_name = terrestrial_endpointremote1
2012-10-10 11:34:46,654 DEBUG ion.services.sa.tcaa.terrestrial_endpoint recv name: NP (ion_test_8257ab,terrestrial_endpointremote1,B: terrestrial_endpointremote1)
2012-10-10 11:34:46,654 DEBUG ion.services.sa.tcaa.terrestrial_endpoint startup listener recv name: NP (ion_test_8257ab,terrestrial_endpointremote1,B: terrestrial_endpointremote1)
2012-10-10 11:34:46,654 DEBUG ion.services.sa.tcaa.terrestrial_endpoint startup listener recv name: NP (ion_test_8257ab,Edwards-MacBook-Pro_local_2624.33,B: Edwards-MacBook-Pro_local_2624.33)
"""
# Create agent config.
endpoint_config = {
'other_host' : self._other_host,
'other_port' : self._other_port,
'this_port' : 0,
'xs_name' : self._xs_name,
'platform_resource_id' : self._platform_resource_id,
'process' : {
'listen_name' : self._listen_name
}
}
# Spawn the terrestrial enpoint process.
log.debug('Spawning terrestrial endpoint process.')
self._te_pid = self._container_client.spawn_process(
name='remote_endpoint_1',
module='ion.services.sa.tcaa.terrestrial_endpoint',
cls='TerrestrialEndpoint',
config=endpoint_config)
log.debug('Endpoint pid=%s.', str(self._te_pid))
# Create an endpoint client.
# The to_name may be either the process pid or
# the listen_name, which for this remote bridge
# is svc_name + remote_name as above.
self.te_client = TerrestrialEndpointClient(
process=FakeProcess(),
to_name=self._listen_name)
log.debug('Got te client %s.', str(self.te_client))
# Remember the terrestrial port.
self._this_port = self.te_client.get_port()
# Start the event publisher.
self._event_publisher = EventPublisher()
# Start the event subscriber.
self._event_subscriber = EventSubscriber(
event_type='PlatformEvent',
callback=self.consume_event,
origin=self._xs_name)
self._event_subscriber.start()
self._event_subscriber._ready_event.wait(timeout=CFG.endpoint.receive.timeout)
self.addCleanup(self._event_subscriber.stop)
# Start the result subscriber.
self._result_subscriber = EventSubscriber(
event_type='RemoteCommandResult',
origin=self._resource_id,
callback=self.consume_event)
self._result_subscriber.start()
self._result_subscriber._ready_event.wait(timeout=CFG.endpoint.receive.timeout)
self.addCleanup(self._result_subscriber.stop)
def consume_event(self, evt, *args, **kwargs):
"""
Test callback for events.
"""
log.debug('Got event: %s, args: %s, kwargs: %s',
str(evt), str(args), str(kwargs))
if evt.type_ == 'PublicPlatformTelemetryEvent':
self._telem_evts.append(evt)
if self._no_telem_evts > 0 and self._no_telem_evts == len(self._telem_evts):
self._done_telem_evts.set()
elif evt.type_ == 'RemoteQueueModifiedEvent':
self._queue_mod_evts.append(evt)
if self._no_queue_mod_evts > 0 and self._no_queue_mod_evts == len(self._queue_mod_evts):
self._done_queue_mod_evts.set()
elif evt.type_ == 'RemoteCommandTransmittedEvent':
self._cmd_tx_evts.append(evt)
if self._no_cmd_tx_evts > 0 and self._no_cmd_tx_evts == len(self._cmd_tx_evts):
self._done_cmd_tx_evts.set()
elif evt.type_ == 'RemoteCommandResult':
cmd = evt.command
self._results_recv[cmd.command_id] = cmd
if len(self._results_recv) == self._no_requests:
self._done_evt.set()
def on_link_up(self):
"""
Called by a test to simulate turning the link on.
"""
log.debug('Remote client connecting to localhost:%i.',
self._this_port)
self._remote_client.start('localhost', self._this_port)
# Publish a link up event to be caught by the endpoint.
log.debug('Publishing telemetry event.')
self._event_publisher.publish_event(
event_type='PlatformTelemetryEvent',
origin = self._platform_resource_id,
status = TelemetryStatusType.AVAILABLE)
def on_link_down(self):
"""
Called by a test to simulate turning the link off.
"""
self._remote_client.stop()
# Publish a link down event to be caught by the endpoint.
log.debug('Publishing telemetry event.')
self._event_publisher.publish_event(
event_type='PlatformTelemetryEvent',
origin=self._platform_resource_id,
status = TelemetryStatusType.UNAVAILABLE)
def consume_req(self, request):
"""
Remote request callback.
Fire a greenlet to do some fake work before returning via
the remote client to terrestrial endpoint.
"""
# Spawn a greenlet to sleep briefly with each request and
# then respond with a result through the remote client.
log.debug('Remote endpoint got request: %s', str(request))
greenlet = gevent.spawn(self.process_remote_request, request)
self._workers.append(greenlet)
def consume_ack(self, request):
"""
Remote ack callback.
"""
log.debug('Remote endpoint got ack: %s', str(request))
def process_remote_request(self, request):
"""
Process remote request.
Do random amount of fake work and enqueue result for return to
terrestrial endpoint.
"""
worktime = random.uniform(.1,3)
gevent.sleep(worktime)
result = {
'command_id' : request.command_id,
'result' : 'fake_result'
}
log.debug('Finished processing request: %s', str(request))
self._remote_client.enqueue(result)
def remote_server_close(self):
"""
Remote server closed callback.
"""
log.debug('The remote server closed.')
def remote_client_close(self):
"""
Remoe client closed callback.
"""
log.debug('The remote client closed.')
def make_fake_command(self, no):
"""
Build a fake command for use in tests.
"""
cmdstr = 'fake_cmd_%i' % no
cmd = IonObject('RemoteCommand',
resource_id=self._resource_id,
command=cmdstr,
args=['arg1', 23],
kwargs={'kwargs1':'someval'})
return cmd
def make_fake_svc_command(self, no):
"""
Build a fake command for use in tests.
"""
cmdstr = 'fake_cmd_%i' % no
cmd = IonObject('RemoteCommand',
svc_name=self._rmt_svc_name,
command=cmdstr,
args=['arg1', 23],
kwargs={'kwargs1':'someval'})
return cmd
def test_process_queued(self):
"""
test_process_queued
Test forwarding of queued commands upon link up.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
self.on_link_up()
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_process_online(self):
"""
test_process_online
Test forwarding commands when the link is up.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
self.on_link_up()
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
gevent.sleep(.2)
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_remote_late(self):
"""
test_remote_late
Test simulates behavior when the remote side is initially unavailable.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
self.on_link_up()
gevent.sleep(2)
self._remote_server.stop()
self._remote_client.stop()
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
gevent.sleep(3)
self._remote_client.start('localhost', self._this_port)
self._remote_server.start('*', self._other_port)
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
def test_get_clear_queue(self):
"""
test_get_clear_queue
Test endpoint queue get and clear manipulators.
"""
# Set up for events expected.
self._no_queue_mod_evts = self._no_requests * 2
# Queue commands.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Queue commands.
for i in range(self._no_requests):
cmd = self.make_fake_svc_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Confirm get queue with no id.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests * 2)
# Confirm get queue with id.
queue = self.te_client.get_queue(resource_id=self._resource_id)
self.assertEqual(len(queue), self._no_requests)
# Confirm get queue with svc name.
queue = self.te_client.get_queue(svc_name=self._rmt_svc_name)
self.assertEqual(len(queue), self._no_requests)
# Confirm get queue with bogus id.
queue = self.te_client.get_queue(resource_id='bogus_id')
self.assertEqual(len(queue), 0)
# Confirm get queue with bogus id.
queue = self.te_client.get_queue(svc_name='bogus_svc')
self.assertEqual(len(queue), 0)
# Reset queue mod expected events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 1
self._done_queue_mod_evts = AsyncResult()
# Clear queue with no id.
poped = self.te_client.clear_queue()
# Confirm queue mod event and mods.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), self._no_requests * 2)
self.assertEqual(len(queue), 0)
# Queue new commands and confirm event.
self._queue_mod_evts = []
self._no_queue_mod_evts = self._no_requests * 2
self._done_queue_mod_evts = AsyncResult()
self._requests_sent = {}
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
for i in range(self._no_requests):
cmd = self.make_fake_svc_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Reset queue mod expected events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 1
self._done_queue_mod_evts = AsyncResult()
# Clear queue with id.
poped = self.te_client.clear_queue(resource_id=self._resource_id)
# Confirm mods and mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), self._no_requests)
self.assertEqual(len(queue), self._no_requests)
# Reset queue mod expected events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 1
self._done_queue_mod_evts = AsyncResult()
# Clear queue with id.
poped = self.te_client.clear_queue(svc_name=self._rmt_svc_name)
# Confirm mods and mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), self._no_requests)
self.assertEqual(len(queue), 0)
# Queue new commands and confirm events.
self._queue_mod_evts = []
self._no_queue_mod_evts = self._no_requests
self._done_queue_mod_evts = AsyncResult()
self._requests_sent = {}
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Clear queue with bogus id.
poped = self.te_client.clear_queue(resource_id='bogus id')
queue = self.te_client.get_queue()
self.assertEqual(len(poped), 0)
self.assertEqual(len(queue), self._no_requests)
# Clear queue with bogus svc name.
poped = self.te_client.clear_queue(svc_name='bogus id')
queue = self.te_client.get_queue()
self.assertEqual(len(poped), 0)
self.assertEqual(len(queue), self._no_requests)
# Clear queue and confirm empty.
self.te_client.clear_queue()
queue = self.te_client.get_queue()
self.assertEqual(len(queue), 0)
# Turn on link and wait a few seconds.
# Confirm no data or tx events arrive.
self.on_link_up()
gevent.sleep(2)
self.assertEqual(len(self._cmd_tx_evts), 0)
self.assertEqual(len(self._results_recv), 0)
self._no_telem_evts = 2
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
def test_pop_pending_queue(self):
"""
test_pop_pending_queue
Test endpoint queue pop manipulators.
"""
# Set up for events expected.
self._no_queue_mod_evts = self._no_requests
# Queue commands.
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
# Pop a few commands from the queue, confirm events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 3
self._done_queue_mod_evts = AsyncResult()
cmd_ids = self._requests_sent.keys()[:3]
poped = []
for x in cmd_ids:
poped.append(self.te_client.pop_queue(x))
self._requests_sent.pop(x)
# Try poping with illegal args. This should have no effect
poped.append(self.te_client.pop_queue())
poped.append(self.te_client.pop_queue('bogus id'))
poped = [x for x in poped if x != None]
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), 3)
self.assertEqual(len(queue), self._no_requests - 3)
# Turn on the link and verify that only the remaining commands
# get processed.
self._no_telem_evts = 2
self._no_requests = self._no_requests - 3
self._no_cmd_tx_evts = self._no_requests
self.on_link_up()
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
def test_repeated_clear_pop(self):
"""
test_repeated_clear_pop
Test endpoint queue pop manipulators.
"""
# Set up for events expected.
self._no_queue_mod_evts = self._no_requests
for i in range(3):
self._queue_mod_evts = []
self._no_queue_mod_evts = self._no_requests
self._done_queue_mod_evts = AsyncResult()
# Queue commands.
self._requests_sent = {}
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Confirm get queue with no id.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
# Reset queue mod expected events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 1
self._done_queue_mod_evts = AsyncResult()
# Clear queue with no id.
poped = self.te_client.clear_queue()
# Confirm queue mod event and mods.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), self._no_requests)
self.assertEqual(len(queue), 0)
self._queue_mod_evts = []
self._no_queue_mod_evts = self._no_requests
self._done_queue_mod_evts = AsyncResult()
# Queue commands.
self._requests_sent = {}
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Confirm get queue with no id.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
# Pop a few commands from the queue, confirm events.
self._queue_mod_evts = []
self._no_queue_mod_evts = 3
self._done_queue_mod_evts = AsyncResult()
cmd_ids = self._requests_sent.keys()[:3]
poped = []
for x in cmd_ids:
poped.append(self.te_client.pop_queue(x))
self._requests_sent.pop(x)
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
queue = self.te_client.get_queue()
self.assertEqual(len(poped), 3)
self.assertEqual(len(queue), self._no_requests - 3)
self._no_telem_evts = 2
self._no_requests = self._no_requests - 3
self._no_cmd_tx_evts = self._no_requests
self.on_link_up()
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
def test_get_pending(self):
"""
test_process_queued
Test forwarding of queued commands upon link up.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
self.on_link_up()
self._no_requests = 3
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending(resource_id=self._resource_id)
for x in pending:
self.assertIn(x.command_id, self._requests_sent.keys())
self._no_requests = 10
self._done_evt = AsyncResult()
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
#@unittest.skip('Wait for verification of resource registry use.')
def test_persistent_queue(self):
"""
test_persistent_queue
Test ability of endpoint to restore a persistent queue, survive
reboot, etc.
"""
self._no_cmd_tx_evts = self._no_requests
self._no_queue_mod_evts = self._no_requests
self._no_telem_evts = 2
for i in range(self._no_requests):
cmd = self.make_fake_command(i)
cmd = self.te_client.enqueue_command(cmd)
self._requests_sent[cmd.command_id] = cmd
# Confirm queue mod events.
self._done_queue_mod_evts.get(timeout=CFG.endpoint.receive.timeout)
# Confirm get queue with no id.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
# Stop and restart the endpoint process.
# Verify the queue is restored.
self._container_client.terminate_process(self._te_pid)
# Create agent config.
endpoint_config = {
'other_host' : self._other_host,
'other_port' : self._other_port,
'this_port' : 0,
'xs_name' : self._xs_name,
'platform_resource_id' : self._platform_resource_id,
'process' : {
'listen_name' : self._listen_name
}
}
# Spawn the terrestrial enpoint process.
log.debug('Spawning terrestrial endpoint process.')
self._te_pid = self._container_client.spawn_process(
name='remote_endpoint_1',
module='ion.services.sa.tcaa.terrestrial_endpoint',
cls='TerrestrialEndpoint',
config=endpoint_config)
log.debug('Endpoint pid=%s.', str(self._te_pid))
# Create an endpoint client.
# The to_name may be either the process pid or
# the listen_name, which for this remote bridge
# is svc_name + remote_name as above.
self.te_client = TerrestrialEndpointClient(
process=FakeProcess(),
to_name=self._listen_name)
log.debug('Got te client %s.', str(self.te_client))
# Remember the terrestrial port.
self._this_port = self.te_client.get_port()
# Confirm we restored the queue with the previous commands.
queue = self.te_client.get_queue()
self.assertEqual(len(queue), self._no_requests)
self.on_link_up()
self._done_cmd_tx_evts.get(timeout=CFG.endpoint.receive.timeout)
self._done_evt.get(timeout=CFG.endpoint.receive.timeout)
pending = self.te_client.get_pending()
self.assertEqual(len(pending), 0)
self.on_link_down()
self._done_telem_evts.get(timeout=CFG.endpoint.receive.timeout)
self.assertItemsEqual(self._requests_sent.keys(),
self._results_recv.keys())
| bsd-2-clause | -1,828,002,971,545,074,700 | 36.735782 | 202 | 0.587101 | false |
durandj/dockerscript | dockerscript/operations/run.py | 1 | 1025 | """
Run operation for adding shell commands to the Dockerfile
"""
import typing
from .base_operation import Operation
# pylint: disable=too-few-public-methods
class RunOperation(Operation):
"""
An operation for running a shell command
"""
commands: typing.List[str]
def __init__(self, commands: typing.List[str]) -> None:
"""
Creates a shell command operation
"""
self.commands = commands if len(commands) == 1 else ['set -ex'] + commands
def build(self) -> str:
commands = ' \\\n && '.join(self.commands)
return f'RUN {commands}'
# pylint: enable=too-few-public-methods
def run(
image,
command: typing.Union[str, typing.List[str]]):
"""
Adds one or more shell commands to the Docker image
"""
if not command:
raise ValueError('Cannot have an empty set of commands')
operation = RunOperation(
[command] if isinstance(command, str) else command,
)
image.add_operation(operation)
| mit | 624,003,003,467,350,400 | 22.295455 | 82 | 0.626341 | false |
gentoo/gentoo-keys | gkeys/gkeys/cli.py | 1 | 1763 | #
#-*- coding:utf-8 -*-
"""
Gentoo-keys - cli.py
Command line interface module
@copyright: 2012-2015 by Brian Dolbec <[email protected]>
@license: GNU GPL2, see COPYING for details.
"""
from __future__ import print_function
import os
import sys
from gkeys import __version__
from gkeys.base import CliBase
from gkeys.actions import Actions
from gkeys.action_map import Available_Actions, Action_Map
from gkeys.config import GKeysConfig
class Main(CliBase):
'''Main command line interface class'''
def __init__(self, root=None, config=None, print_results=True):
""" Main class init function.
@param root: string, root path to use
"""
CliBase.__init__(self)
self.root = root or "/"
self.config = config or GKeysConfig(root=root)
self.config.options['print_results'] = print_results
self.cli_config = {
'Actions': Actions,
'Available_Actions': Available_Actions,
'Action_Map': Action_Map,
'Base_Options': [],
'prog': 'gkeys',
'description': 'Gentoo-keys manager program',
'epilog': '''CAUTION: adding UNTRUSTED keys can be HAZARDOUS to your system!'''
}
self.version = __version__
def __call__(self, args=None):
"""Main class call function
@param args: Optional list of argumanets to parse and action to run
Defaults to sys.argv[1:]
"""
if args:
ok = self.setup(args, [])
else:
args = self.parse_args(sys.argv[1:])
ok = self.setup(args, os.path.join(self.config['configdir'],'gkeys.conf'))
if ok:
return self.run(args)
return False
| gpl-2.0 | 8,904,602,838,361,995,000 | 26.123077 | 91 | 0.587067 | false |
ARudiuk/mne-python | mne/epochs.py | 1 | 129314 | # -*- coding: utf-8 -*-
"""Tools for working with epoched data"""
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Daniel Strohmeier <[email protected]>
# Denis Engemann <[email protected]>
# Mainak Jas <[email protected]>
#
# License: BSD (3-clause)
from copy import deepcopy
import json
import os.path as op
from distutils.version import LooseVersion
import numpy as np
import scipy
from .io.write import (start_file, start_block, end_file, end_block,
write_int, write_float_matrix, write_float,
write_id, write_string, _get_split_size)
from .io.meas_info import read_meas_info, write_meas_info, _merge_info
from .io.open import fiff_open, _get_next_fname
from .io.tree import dir_tree_find
from .io.tag import read_tag, read_tag_info
from .io.constants import FIFF
from .io.pick import (pick_types, channel_indices_by_type, channel_type,
pick_channels, pick_info, _pick_data_channels,
_pick_aux_channels, _DATA_CH_TYPES_SPLIT)
from .io.proj import setup_proj, ProjMixin, _proj_equal
from .io.base import _BaseRaw, ToDataFrameMixin, TimeMixin
from .bem import _check_origin
from .evoked import EvokedArray
from .baseline import rescale, _log_rescale
from .channels.channels import (ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin)
from .filter import resample, detrend, FilterMixin
from .event import _read_events_fif
from .fixes import in1d, _get_args
from .viz import (plot_epochs, plot_epochs_psd, plot_epochs_psd_topomap,
plot_epochs_image, plot_topo_image_epochs)
from .utils import (check_fname, logger, verbose, _check_type_picks,
_time_mask, check_random_state, object_hash, warn,
_check_copy_dep)
from .externals.six import iteritems, string_types
from .externals.six.moves import zip
def _save_split(epochs, fname, part_idx, n_parts):
"""Split epochs"""
# insert index in filename
path, base = op.split(fname)
idx = base.find('.')
if part_idx > 0:
fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx,
base[idx + 1:]))
next_fname = None
if part_idx < n_parts - 1:
next_fname = op.join(path, '%s-%d.%s' % (base[:idx], part_idx + 1,
base[idx + 1:]))
next_idx = part_idx + 1
fid = start_file(fname)
info = epochs.info
meas_id = info['meas_id']
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, info['meas_id'])
# Write measurement info
write_meas_info(fid, info)
# One or more evoked data sets
start_block(fid, FIFF.FIFFB_PROCESSED_DATA)
start_block(fid, FIFF.FIFFB_MNE_EPOCHS)
# write events out after getting data to ensure bad events are dropped
data = epochs.get_data()
start_block(fid, FIFF.FIFFB_MNE_EVENTS)
write_int(fid, FIFF.FIFF_MNE_EVENT_LIST, epochs.events.T)
mapping_ = ';'.join([k + ':' + str(v) for k, v in
epochs.event_id.items()])
write_string(fid, FIFF.FIFF_DESCRIPTION, mapping_)
end_block(fid, FIFF.FIFFB_MNE_EVENTS)
# First and last sample
first = int(round(epochs.tmin * info['sfreq'])) # round just to be safe
last = first + len(epochs.times) - 1
write_int(fid, FIFF.FIFF_FIRST_SAMPLE, first)
write_int(fid, FIFF.FIFF_LAST_SAMPLE, last)
# save baseline
if epochs.baseline is not None:
bmin, bmax = epochs.baseline
bmin = epochs.times[0] if bmin is None else bmin
bmax = epochs.times[-1] if bmax is None else bmax
write_float(fid, FIFF.FIFF_MNE_BASELINE_MIN, bmin)
write_float(fid, FIFF.FIFF_MNE_BASELINE_MAX, bmax)
# The epochs itself
decal = np.empty(info['nchan'])
for k in range(info['nchan']):
decal[k] = 1.0 / (info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0))
data *= decal[np.newaxis, :, np.newaxis]
write_float_matrix(fid, FIFF.FIFF_EPOCH, data)
# undo modifications to data
data /= decal[np.newaxis, :, np.newaxis]
write_string(fid, FIFF.FIFFB_MNE_EPOCHS_DROP_LOG,
json.dumps(epochs.drop_log))
write_int(fid, FIFF.FIFFB_MNE_EPOCHS_SELECTION,
epochs.selection)
# And now write the next file info in case epochs are split on disk
if next_fname is not None and n_parts > 1:
start_block(fid, FIFF.FIFFB_REF)
write_int(fid, FIFF.FIFF_REF_ROLE, FIFF.FIFFV_ROLE_NEXT_FILE)
write_string(fid, FIFF.FIFF_REF_FILE_NAME, op.basename(next_fname))
if meas_id is not None:
write_id(fid, FIFF.FIFF_REF_FILE_ID, meas_id)
write_int(fid, FIFF.FIFF_REF_FILE_NUM, next_idx)
end_block(fid, FIFF.FIFFB_REF)
end_block(fid, FIFF.FIFFB_MNE_EPOCHS)
end_block(fid, FIFF.FIFFB_PROCESSED_DATA)
end_block(fid, FIFF.FIFFB_MEAS)
end_file(fid)
class _BaseEpochs(ProjMixin, ContainsMixin, UpdateChannelsMixin,
SetChannelsMixin, InterpolationMixin, FilterMixin,
ToDataFrameMixin, TimeMixin):
"""Abstract base class for Epochs-type classes
This class provides basic functionality and should never be instantiated
directly. See Epochs below for an explanation of the parameters.
"""
def __init__(self, info, data, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), raw=None,
picks=None, name='Unknown', reject=None, flat=None,
decim=1, reject_tmin=None, reject_tmax=None, detrend=None,
add_eeg_ref=True, proj=True, on_missing='error',
preload_at_end=False, selection=None, drop_log=None,
verbose=None):
self.verbose = verbose
self.name = name
if on_missing not in ['error', 'warning', 'ignore']:
raise ValueError('on_missing must be one of: error, '
'warning, ignore. Got: %s' % on_missing)
# check out event_id dict
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
elif isinstance(event_id, dict):
if not all(isinstance(v, int) for v in event_id.values()):
raise ValueError('Event IDs must be of type integer')
if not all(isinstance(k, string_types) for k in event_id):
raise ValueError('Event names must be of type str')
elif isinstance(event_id, list):
if not all(isinstance(v, int) for v in event_id):
raise ValueError('Event IDs must be of type integer')
event_id = dict(zip((str(i) for i in event_id), event_id))
elif isinstance(event_id, int):
event_id = {str(event_id): event_id}
else:
raise ValueError('event_id must be dict or int.')
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
if events.dtype.kind not in ['i', 'u']:
raise ValueError('events must be an array of type int')
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError('events must be 2D with 3 columns')
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = ('No matching events found for %s '
'(event id %i)' % (key, val))
if on_missing == 'error':
raise ValueError(msg)
elif on_missing == 'warning':
warn(msg)
else: # on_missing == 'ignore':
pass
values = list(self.event_id.values())
selected = in1d(events[:, 2], values)
if selection is None:
self.selection = np.where(selected)[0]
else:
self.selection = selection
if drop_log is None:
self.drop_log = [list() if k in self.selection else ['IGNORED']
for k in range(len(events))]
else:
self.drop_log = drop_log
events = events[selected]
n_events = len(events)
if n_events > 1:
if np.diff(events.astype(np.int64)[:, 0]).min() <= 0:
warn('The events passed to the Epochs constructor are not '
'chronologically ordered.', RuntimeWarning)
if n_events > 0:
logger.info('%d matching events found' % n_events)
else:
raise ValueError('No desired events found.')
self.events = events
del events
else:
self.drop_log = list()
self.selection = np.array([], int)
# do not set self.events here, let subclass do it
# check reject_tmin and reject_tmax
if (reject_tmin is not None) and (reject_tmin < tmin):
raise ValueError("reject_tmin needs to be None or >= tmin")
if (reject_tmax is not None) and (reject_tmax > tmax):
raise ValueError("reject_tmax needs to be None or <= tmax")
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError('reject_tmin needs to be < reject_tmax')
if detrend not in [None, 0, 1]:
raise ValueError('detrend must be None, 0, or 1')
# check that baseline is in available data
if tmin > tmax:
raise ValueError('tmin has to be less than or equal to tmax')
_check_baseline(baseline, tmin, tmax, info['sfreq'])
_log_rescale(baseline)
self.baseline = baseline
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self.detrend = detrend
self._raw = raw
self.info = info
del info
if picks is None:
picks = list(range(len(self.info['ch_names'])))
else:
self.info = pick_info(self.info, picks)
self.picks = _check_type_picks(picks)
if len(picks) == 0:
raise ValueError("Picks cannot be empty.")
if data is None:
self.preload = False
self._data = None
else:
assert decim == 1
if data.ndim != 3 or data.shape[2] != \
round((tmax - tmin) * self.info['sfreq']) + 1:
raise RuntimeError('bad data shape')
self.preload = True
self._data = data
self._offset = None
# Handle times
sfreq = float(self.info['sfreq'])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx,
int(round(tmax * sfreq)) + 1) / sfreq
self.times = self._raw_times.copy()
self._decim = 1
self.decimate(decim)
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, 'delayed', False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s'
% (valid_proj, proj))
if proj == 'delayed':
self._do_delayed_proj = True
logger.info('Entering delayed SSP mode.')
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, add_eeg_ref,
activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
def load_data(self):
"""Load the data if not already preloaded
Returns
-------
epochs : instance of Epochs
The epochs object.
Notes
-----
This function operates in-place.
.. versionadded:: 0.10.0
"""
if self.preload:
return
self._data = self._get_data()
self.preload = True
self._decim_slice = slice(None, None, None)
self._decim = 1
self._raw_times = self.times
assert self._data.shape[-1] == len(self.times)
return self
def decimate(self, decim, offset=0):
"""Decimate the epochs
Parameters
----------
decim : int
The amount to decimate data.
offset : int
Apply an offset to where the decimation starts relative to the
sample corresponding to t=0. The offset is in samples at the
current sampling rate.
.. versionadded:: 0.12
Returns
-------
epochs : instance of Epochs
The decimated Epochs object.
Notes
-----
Decimation can be done multiple times. For example,
``epochs.decimate(2).decimate(2)`` will be the same as
``epochs.decimate(4)``.
.. versionadded:: 0.10.0
"""
if decim < 1 or decim != int(decim):
raise ValueError('decim must be an integer > 0')
decim = int(decim)
new_sfreq = self.info['sfreq'] / float(decim)
lowpass = self.info['lowpass']
if decim > 1 and lowpass is None:
warn('The measurement information indicates data is not low-pass '
'filtered. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (decim, new_sfreq))
elif decim > 1 and new_sfreq < 2.5 * lowpass:
warn('The measurement information indicates a low-pass frequency '
'of %g Hz. The decim=%i parameter will result in a sampling '
'frequency of %g Hz, which can cause aliasing artifacts.'
% (lowpass, decim, new_sfreq)) # > 50% nyquist lim
offset = int(offset)
if not 0 <= offset < decim:
raise ValueError('decim must be at least 0 and less than %s, got '
'%s' % (decim, offset))
self._decim *= decim
start_idx = int(round(self._raw_times[0] * (self.info['sfreq'] *
self._decim)))
i_start = start_idx % self._decim
decim_slice = slice(i_start + offset, len(self._raw_times),
self._decim)
self.info['sfreq'] = new_sfreq
if self.preload:
self._data = self._data[:, :, decim_slice].copy()
self._raw_times = self._raw_times[decim_slice].copy()
self._decim_slice = slice(None, None, None)
self._decim = 1
self.times = self._raw_times
else:
self._decim_slice = decim_slice
self.times = self._raw_times[self._decim_slice]
return self
@verbose
def apply_baseline(self, baseline, verbose=None):
"""Baseline correct epochs
Parameters
----------
baseline : tuple of length 2
The time interval to apply baseline correction. (a, b) is the
interval is between "a (s)" and "b (s)". If a is None the beginning
of the data is used and if b is None then b is set to the end of
the interval. If baseline is equal to (None, None) all the time
interval is used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
epochs : instance of Epochs
The baseline-corrected Epochs object.
Notes
-----
Baseline correction can be done multiple times.
.. versionadded:: 0.10.0
"""
if not self.preload:
# Eventually we can relax this restriction, but it will require
# more careful checking of baseline (e.g., refactor with the
# _BaseEpochs.__init__ checks)
raise RuntimeError('Data must be loaded to apply a new baseline')
_check_baseline(baseline, self.tmin, self.tmax, self.info['sfreq'])
picks = _pick_data_channels(self.info, exclude=[], with_ref_meg=True)
picks_aux = _pick_aux_channels(self.info, exclude=[])
picks = np.sort(np.concatenate((picks, picks_aux)))
data = self._data
data[:, picks, :] = rescale(data[:, picks, :], self.times, baseline,
copy=False)
self.baseline = baseline
return self
def _reject_setup(self, reject, flat):
"""Sets self._reject_time and self._channel_type_idx"""
idx = channel_indices_by_type(self.info)
reject = deepcopy(reject) if reject is not None else dict()
flat = deepcopy(flat) if flat is not None else dict()
for rej, kind in zip((reject, flat), ('reject', 'flat')):
if not isinstance(rej, dict):
raise TypeError('reject and flat must be dict or None, not %s'
% type(rej))
bads = set(rej.keys()) - set(idx.keys())
if len(bads) > 0:
raise KeyError('Unknown channel types found in %s: %s'
% (kind, bads))
for key in idx.keys():
# don't throw an error if rejection/flat would do nothing
if len(idx[key]) == 0 and (np.isfinite(reject.get(key, np.inf)) or
flat.get(key, -1) >= 0):
# This is where we could eventually add e.g.
# self.allow_missing_reject_keys check to allow users to
# provide keys that don't exist in data
raise ValueError("No %s channel found. Cannot reject based on "
"%s." % (key.upper(), key.upper()))
# check for invalid values
for rej, kind in zip((reject, flat), ('Rejection', 'Flat')):
for key, val in rej.items():
if val is None or val < 0:
raise ValueError('%s value must be a number >= 0, not "%s"'
% (kind, val))
# now check to see if our rejection and flat are getting more
# restrictive
old_reject = self.reject if self.reject is not None else dict()
old_flat = self.flat if self.flat is not None else dict()
bad_msg = ('{kind}["{key}"] == {new} {op} {old} (old value), new '
'{kind} values must be at least as stringent as '
'previous ones')
for key in set(reject.keys()).union(old_reject.keys()):
old = old_reject.get(key, np.inf)
new = reject.get(key, np.inf)
if new > old:
raise ValueError(bad_msg.format(kind='reject', key=key,
new=new, old=old, op='>'))
for key in set(flat.keys()).union(old_flat.keys()):
old = old_flat.get(key, -np.inf)
new = flat.get(key, -np.inf)
if new < old:
raise ValueError(bad_msg.format(kind='flat', key=key,
new=new, old=old, op='<'))
# after validation, set parameters
self._bad_dropped = False
self._channel_type_idx = idx
self.reject = reject if len(reject) > 0 else None
self.flat = flat if len(flat) > 0 else None
if (self.reject_tmin is None) and (self.reject_tmax is None):
self._reject_time = None
else:
if self.reject_tmin is None:
reject_imin = None
else:
idxs = np.nonzero(self.times >= self.reject_tmin)[0]
reject_imin = idxs[0]
if self.reject_tmax is None:
reject_imax = None
else:
idxs = np.nonzero(self.times <= self.reject_tmax)[0]
reject_imax = idxs[-1]
self._reject_time = slice(reject_imin, reject_imax)
@verbose
def _is_good_epoch(self, data, verbose=None):
"""Determine if epoch is good"""
if isinstance(data, string_types):
return False, [data]
if data is None:
return False, ['NO_DATA']
n_times = len(self.times)
if data.shape[1] < n_times:
# epoch is too short ie at the end of the data
return False, ['TOO_SHORT']
if self.reject is None and self.flat is None:
return True, None
else:
if self._reject_time is not None:
data = data[:, self._reject_time]
return _is_good(data, self.ch_names, self._channel_type_idx,
self.reject, self.flat, full_report=True,
ignore_chs=self.info['bads'])
@verbose
def _detrend_offset_decim(self, epoch, verbose=None):
"""Aux Function: detrend, baseline correct, offset, decim
Note: operates inplace
"""
if (epoch is None) or isinstance(epoch, string_types):
return epoch
# Detrend
if self.detrend is not None:
picks = _pick_data_channels(self.info, exclude=[])
epoch[picks] = detrend(epoch[picks], self.detrend, axis=1)
# Baseline correct
picks = pick_types(self.info, meg=True, eeg=True, stim=False,
ref_meg=True, eog=True, ecg=True, seeg=True,
emg=True, bio=True, ecog=True, exclude=[])
epoch[picks] = rescale(epoch[picks], self._raw_times, self.baseline,
copy=False, verbose=False)
# handle offset
if self._offset is not None:
epoch += self._offset
# Decimate if necessary (i.e., epoch not preloaded)
epoch = epoch[:, self._decim_slice]
return epoch
def iter_evoked(self):
"""Iterate over epochs as a sequence of Evoked objects
The Evoked objects yielded will each contain a single epoch (i.e., no
averaging is performed).
"""
self._current = 0
while True:
out = self.next(True)
if out is None:
return # properly signal the end of iteration
data, event_id = out
tmin = self.times[0]
info = deepcopy(self.info)
yield EvokedArray(data, info, tmin, comment=str(event_id))
def subtract_evoked(self, evoked=None):
"""Subtract an evoked response from each epoch
Can be used to exclude the evoked response when analyzing induced
activity, see e.g. [1].
References
----------
[1] David et al. "Mechanisms of evoked and induced responses in
MEG/EEG", NeuroImage, vol. 31, no. 4, pp. 1580-1591, July 2006.
Parameters
----------
evoked : instance of Evoked | None
The evoked response to subtract. If None, the evoked response
is computed from Epochs itself.
Returns
-------
self : instance of Epochs
The modified instance (instance is also modified inplace).
"""
logger.info('Subtracting Evoked from Epochs')
if evoked is None:
picks = _pick_data_channels(self.info, exclude=[])
evoked = self.average(picks)
# find the indices of the channels to use
picks = pick_channels(evoked.ch_names, include=self.ch_names)
# make sure the omitted channels are not data channels
if len(picks) < len(self.ch_names):
sel_ch = [evoked.ch_names[ii] for ii in picks]
diff_ch = list(set(self.ch_names).difference(sel_ch))
diff_idx = [self.ch_names.index(ch) for ch in diff_ch]
diff_types = [channel_type(self.info, idx) for idx in diff_idx]
bad_idx = [diff_types.index(t) for t in diff_types if t in
_DATA_CH_TYPES_SPLIT]
if len(bad_idx) > 0:
bad_str = ', '.join([diff_ch[ii] for ii in bad_idx])
raise ValueError('The following data channels are missing '
'in the evoked response: %s' % bad_str)
logger.info(' The following channels are not included in the '
'subtraction: %s' % ', '.join(diff_ch))
# make sure the times match
if (len(self.times) != len(evoked.times) or
np.max(np.abs(self.times - evoked.times)) >= 1e-7):
raise ValueError('Epochs and Evoked object do not contain '
'the same time points.')
# handle SSPs
if not self.proj and evoked.proj:
warn('Evoked has SSP applied while Epochs has not.')
if self.proj and not evoked.proj:
evoked = evoked.copy().apply_proj()
# find the indices of the channels to use in Epochs
ep_picks = [self.ch_names.index(evoked.ch_names[ii]) for ii in picks]
# do the subtraction
if self.preload:
self._data[:, ep_picks, :] -= evoked.data[picks][None, :, :]
else:
if self._offset is None:
self._offset = np.zeros((len(self.ch_names), len(self.times)),
dtype=np.float)
self._offset[ep_picks] -= evoked.data[picks]
logger.info('[done]')
return self
def __next__(self, *args, **kwargs):
"""Wrapper for Py3k"""
return self.next(*args, **kwargs)
def __hash__(self):
if not self.preload:
raise RuntimeError('Cannot hash epochs unless preloaded')
return object_hash(dict(info=self.info, data=self._data))
def average(self, picks=None):
"""Compute average of epochs
Parameters
----------
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : instance of Evoked
The averaged epochs.
Notes
-----
Computes an average of all epochs in the instance, even if
they correspond to different conditions. To average by condition,
do ``epochs[condition].average()`` for each condition separately.
"""
return self._compute_mean_or_stderr(picks, 'ave')
def standard_error(self, picks=None):
"""Compute standard error over epochs
Parameters
----------
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
Returns
-------
evoked : instance of Evoked
The standard error over epochs.
"""
return self._compute_mean_or_stderr(picks, 'stderr')
def _compute_mean_or_stderr(self, picks, mode='ave'):
"""Compute the mean or std over epochs and return Evoked"""
_do_std = True if mode == 'stderr' else False
n_channels = len(self.ch_names)
n_times = len(self.times)
if self.preload:
n_events = len(self.events)
fun = np.std if _do_std else np.mean
data = fun(self._data, axis=0)
assert len(self.events) == len(self._data)
else:
data = np.zeros((n_channels, n_times))
n_events = 0
for e in self:
data += e
n_events += 1
if n_events > 0:
data /= n_events
else:
data.fill(np.nan)
# convert to stderr if requested, could do in one pass but do in
# two (slower) in case there are large numbers
if _do_std:
data_mean = data.copy()
data.fill(0.)
for e in self:
data += (e - data_mean) ** 2
data = np.sqrt(data / n_events)
if not _do_std:
kind = 'average'
else:
kind = 'standard_error'
data /= np.sqrt(n_events)
return self._evoked_from_epoch_data(data, self.info, picks, n_events,
kind)
def _evoked_from_epoch_data(self, data, info, picks, n_events, kind):
"""Helper to create an evoked object from epoch data"""
info = deepcopy(info)
evoked = EvokedArray(data, info, tmin=self.times[0],
comment=self.name, nave=n_events, kind=kind,
verbose=self.verbose)
# XXX: above constructor doesn't recreate the times object precisely
evoked.times = self.times.copy()
# pick channels
if picks is None:
picks = _pick_data_channels(evoked.info, exclude=[])
ch_names = [evoked.ch_names[p] for p in picks]
evoked.pick_channels(ch_names)
if len(evoked.info['ch_names']) == 0:
raise ValueError('No data channel found when averaging.')
if evoked.nave < 1:
warn('evoked object is empty (based on less than 1 epoch)')
return evoked
@property
def ch_names(self):
"""Channel names"""
return self.info['ch_names']
def plot(self, picks=None, scalings=None, show=True,
block=False, n_epochs=20,
n_channels=20, title=None):
"""Visualize epochs.
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side
of the main axes. Calling this function drops all the selected bad
epochs as well as bad epochs marked beforehand with rejection
parameters.
Parameters
----------
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scaling factors for the traces. If any fields in scalings are
'auto', the scaling factor is set to match the 99.5th percentile of
a subset of the corresponding data. If scalings == 'auto', all
scalings fields are set to 'auto'. If any fields are 'auto' and
data is not preloaded, a subset of epochs up to 100mb will be
loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1,
chpi=1e-4)
show : bool
Whether to show the figure or not.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on a
sub plot.
n_epochs : int
The number of epochs per view.
n_channels : int
The number of channels per view on mne_browse_epochs. If trellis is
True, this parameter has no effect. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
If trellis is True, this parameter has no effect.
Defaults to None.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can
be used to navigate between channels and epochs and the scaling can be
adjusted with - and + (or =) keys, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use(``TkAgg``) should work).
Full screen mode can be toggled with f11 key. The amount of epochs
and channels per view can be adjusted with home/end and
page down/page up keys. Butterfly plot can be toggled with ``b`` key.
Right mouse click adds a vertical line to the plot.
.. versionadded:: 0.10.0
"""
return plot_epochs(self, picks=picks, scalings=scalings,
n_epochs=n_epochs, n_channels=n_channels,
title=title, show=show, block=block)
def plot_psd(self, fmin=0, fmax=np.inf, proj=False, bandwidth=None,
adaptive=False, low_bias=True, normalization='length',
picks=None, ax=None, color='black', area_mode='std',
area_alpha=0.33, dB=True, n_jobs=1, verbose=None, show=True):
"""Plot the power spectral density across epochs
Parameters
----------
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
The default value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across
channels) will be plotted. If 'range', the min and max (across
channels) will be plotted. Bad channels will be excluded from
these calculations. If None, no area will be plotted.
area_alpha : float
Alpha for the area.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd(self, fmin=fmin, fmax=fmax, proj=proj,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
picks=picks, ax=ax, color=color,
area_mode=area_mode, area_alpha=area_alpha,
dB=dB, n_jobs=n_jobs, verbose=None, show=show)
def plot_psd_topomap(self, bands=None, vmin=None, vmax=None, proj=False,
bandwidth=None, adaptive=False, low_bias=True,
normalization='length', ch_type=None,
layout=None, cmap='RdBu_r', agg_fun=None, dB=True,
n_jobs=1, normalize=False, cbar_fmt='%0.3f',
outlines='head', show=True, verbose=None):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the
output equals vmax(data). Defaults to None.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
The default value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : {None, 'mag', 'grad', 'planar1', 'planar2', 'eeg'}
The channel type to plot. For 'grad', the gradiometers are
collected in
pairs and the RMS for each pair is plotted. If None, defaults to
'mag' if MEG data are present and to 'eeg' if only EEG data are
present.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize
is False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd_topomap(
self, bands=bands, vmin=vmin, vmax=vmax, proj=proj,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias, normalization=normalization,
ch_type=ch_type, layout=layout, cmap=cmap,
agg_fun=agg_fun, dB=dB, n_jobs=n_jobs, normalize=normalize,
cbar_fmt=cbar_fmt, outlines=outlines, show=show, verbose=None)
def plot_topo_image(self, layout=None, sigma=0., vmin=None, vmax=None,
colorbar=True, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k', font_color='w',
show=True):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along the
epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color : str | obj
The color of tick labels in the colorbar. Defaults to white.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_topo_image_epochs(
self, layout=layout, sigma=sigma, vmin=vmin, vmax=vmax,
colorbar=colorbar, order=order, cmap=cmap,
layout_scale=layout_scale, title=title, scalings=scalings,
border=border, fig_facecolor=fig_facecolor, font_color=font_color,
show=show)
@verbose
def drop_bad(self, reject='existing', flat='existing', verbose=None):
"""Drop bad epochs without retaining the epochs data.
Should be used before slicing operations.
.. warning:: This operation is slow since all epochs have to be read
from disk. To avoid reading epochs from disk multiple
times, use :func:`mne.Epochs.load_data()`.
Parameters
----------
reject : dict | str | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. If 'existing',
then the rejection parameters set at instantiation are used.
flat : dict | str | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done. If 'existing',
then the flat parameters set at instantiation are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The epochs with bad epochs dropped. Operates in-place.
Notes
-----
Dropping bad epochs can be done multiple times with different
``reject`` and ``flat`` parameters. However, once an epoch is
dropped, it is dropped forever, so if more lenient thresholds may
subsequently be applied, `epochs.copy` should be used.
"""
if reject == 'existing':
if flat == 'existing' and self._bad_dropped:
return
reject = self.reject
if flat == 'existing':
flat = self.flat
if any(isinstance(rej, string_types) and rej != 'existing' for
rej in (reject, flat)):
raise ValueError('reject and flat, if strings, must be "existing"')
self._reject_setup(reject, flat)
self._get_data(out=False)
return self
def drop_log_stats(self, ignore=('IGNORED',)):
"""Compute the channel stats based on a drop_log from Epochs.
Parameters
----------
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
See Also
--------
plot_drop_log
"""
return _drop_log_stats(self.drop_log, ignore)
def plot_drop_log(self, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
perc : float
Total percentage of epochs dropped.
fig : Instance of matplotlib.figure.Figure
The figure.
"""
if not self._bad_dropped:
raise ValueError("You cannot use plot_drop_log since bad "
"epochs have not yet been dropped. "
"Use epochs.drop_bad().")
from .viz import plot_drop_log
return plot_drop_log(self.drop_log, threshold, n_max_plot, subject,
color=color, width=width, ignore=ignore,
show=show)
def plot_image(self, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r',
fig=None, overlay_times=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is
applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two
axes for drawing the single trials and evoked responses. If
None a new figure is created. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in
seconds and is added to the image. It is typically useful to
display reaction times. Note that it is defined with respect
to the order of epochs such that overlay_times[0] corresponds
to epochs[0].
Returns
-------
figs : list of matplotlib figures
One figure per channel displayed.
"""
return plot_epochs_image(self, picks=picks, sigma=sigma, vmin=vmin,
vmax=vmax, colorbar=colorbar, order=order,
show=show, units=units, scalings=scalings,
cmap=cmap, fig=fig,
overlay_times=overlay_times)
@verbose
def drop(self, indices, reason='USER', verbose=None):
"""Drop epochs based on indices or boolean mask
.. note:: The indices refer to the current set of undropped epochs
rather than the complete set of dropped and undropped epochs.
They are therefore not necessarily consistent with any
external indices (e.g., behavioral logs). To drop epochs
based on external criteria, do not use the ``preload=True``
flag when constructing an Epochs object, and call this
method before calling the :func:`mne.Epochs.drop_bad` or
:func:`mne.Epochs.load_data` methods.
Parameters
----------
indices : array of ints or bools
Set epochs to remove by specifying indices to remove or a boolean
mask to apply (where True values get removed). Events are
correspondingly modified.
reason : str
Reason for dropping the epochs ('ECG', 'timeout', 'blink' etc).
Default: 'USER'.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The epochs with indices dropped. Operates in-place.
"""
indices = np.atleast_1d(indices)
if indices.ndim > 1:
raise ValueError("indices must be a scalar or a 1-d array")
if indices.dtype == bool:
indices = np.where(indices)[0]
out_of_bounds = (indices < 0) | (indices >= len(self.events))
if out_of_bounds.any():
first = indices[out_of_bounds][0]
raise IndexError("Epoch index %d is out of bounds" % first)
for ii in indices:
self.drop_log[self.selection[ii]].append(reason)
self.selection = np.delete(self.selection, indices)
self.events = np.delete(self.events, indices, axis=0)
if self.preload:
self._data = np.delete(self._data, indices, axis=0)
count = len(indices)
logger.info('Dropped %d epoch%s' % (count, '' if count == 1 else 's'))
return self
def _get_epoch_from_raw(self, idx, verbose=None):
"""Method to get a given epoch from disk"""
raise NotImplementedError
def _project_epoch(self, epoch):
"""Helper to process a raw epoch based on the delayed param"""
# whenever requested, the first epoch is being projected.
if (epoch is None) or isinstance(epoch, string_types):
# can happen if t < 0 or reject based on annotations
return epoch
proj = self._do_delayed_proj or self.proj
if self._projector is not None and proj is True:
epoch = np.dot(self._projector, epoch)
return epoch
@verbose
def _get_data(self, out=True, verbose=None):
"""Load all data, dropping bad epochs along the way
Parameters
----------
out : bool
Return the data. Setting this to False is used to reject bad
epochs without caching all the data, which saves memory.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
n_events = len(self.events)
# in case there are no good events
if self.preload:
# we will store our result in our existing array
data = self._data
else:
# we start out with an empty array, allocate only if necessary
data = np.empty((0, len(self.info['ch_names']), len(self.times)))
logger.info('Loading data for %s events and %s original time '
'points ...' % (n_events, len(self._raw_times)))
if self._bad_dropped:
if not out:
return
if self.preload:
return data
# we need to load from disk, drop, and return data
for idx in range(n_events):
# faster to pre-allocate memory here
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
if self._do_delayed_proj:
epoch_out = epoch_noproj
else:
epoch_out = self._project_epoch(epoch_noproj)
if idx == 0:
data = np.empty((n_events, len(self.ch_names),
len(self.times)), dtype=epoch_out.dtype)
data[idx] = epoch_out
else:
# bads need to be dropped, this might occur after a preload
# e.g., when calling drop_bad w/new params
good_idx = []
n_out = 0
assert n_events == len(self.selection)
for idx, sel in enumerate(self.selection):
if self.preload: # from memory
if self._do_delayed_proj:
epoch_noproj = self._data[idx]
epoch = self._project_epoch(epoch_noproj)
else:
epoch_noproj = None
epoch = self._data[idx]
else: # from disk
epoch_noproj = self._get_epoch_from_raw(idx)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
epoch = self._project_epoch(epoch_noproj)
epoch_out = epoch_noproj if self._do_delayed_proj else epoch
is_good, offending_reason = self._is_good_epoch(epoch)
if not is_good:
self.drop_log[sel] += offending_reason
continue
good_idx.append(idx)
# store the epoch if there is a reason to (output or update)
if out or self.preload:
# faster to pre-allocate, then trim as necessary
if n_out == 0 and not self.preload:
data = np.empty((n_events, epoch_out.shape[0],
epoch_out.shape[1]),
dtype=epoch_out.dtype, order='C')
data[n_out] = epoch_out
n_out += 1
self._bad_dropped = True
logger.info("%d bad epochs dropped" % (n_events - len(good_idx)))
# Now update our properties
if len(good_idx) == 0: # silly fix for old numpy index error
self.selection = np.array([], int)
self.events = np.empty((0, 3))
else:
self.selection = self.selection[good_idx]
self.events = np.atleast_2d(self.events[good_idx])
# adjust the data size if there is a reason to (output or update)
if out or self.preload:
data.resize((n_out,) + data.shape[1:], refcheck=False)
return data if out else None
def get_data(self):
"""Get all epochs as a 3D array
Returns
-------
data : array of shape (n_epochs, n_channels, n_times)
A copy of the epochs data.
"""
return self._get_data()
def __len__(self):
"""The number of epochs
Returns
-------
n_epochs : int
The number of remaining epochs.
Notes
-----
This function only works if bad epochs have been dropped.
Examples
--------
This can be used as::
>>> epochs.drop_bad() # doctest: +SKIP
>>> len(epochs) # doctest: +SKIP
43
>>> len(epochs.events) # doctest: +SKIP
43
"""
if not self._bad_dropped:
raise RuntimeError('Since bad epochs have not been dropped, the '
'length of the Epochs is not known. Load the '
'Epochs with preload=True, or call '
'Epochs.drop_bad(). To find the number '
'of events in the Epochs, use '
'len(Epochs.events).')
return len(self.events)
def __iter__(self):
"""Function to make iteration over epochs easy
Notes
-----
This enables the use of this Python pattern::
>>> for epoch in epochs: # doctest: +SKIP
>>> print(epoch) # doctest: +SKIP
Where ``epoch`` is given by successive outputs of
:func:`mne.Epochs.next`.
"""
self._current = 0
while True:
x = self.next()
if x is None:
return
yield x
def next(self, return_event_id=False):
"""Iterate over epoch data.
Parameters
----------
return_event_id : bool
If True, return both the epoch data and an event_id.
Returns
-------
epoch : array of shape (n_channels, n_times)
The epoch data.
event_id : int
The event id. Only returned if ``return_event_id`` is ``True``.
"""
if self.preload:
if self._current >= len(self._data):
return # signal the end
epoch = self._data[self._current]
self._current += 1
else:
is_good = False
while not is_good:
if self._current >= len(self.events):
return # signal the end properly
epoch_noproj = self._get_epoch_from_raw(self._current)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
epoch = self._project_epoch(epoch_noproj)
self._current += 1
is_good, _ = self._is_good_epoch(epoch)
# If delayed-ssp mode, pass 'virgin' data after rejection decision.
if self._do_delayed_proj:
epoch = epoch_noproj
if not return_event_id:
return epoch
else:
return epoch, self.events[self._current - 1][-1]
return epoch if not return_event_id else epoch, self.event_id
@property
def tmin(self):
return self.times[0]
@property
def tmax(self):
return self.times[-1]
def __repr__(self):
""" Build string representation"""
s = 'n_events : %s ' % len(self.events)
s += '(all good)' if self._bad_dropped else '(good & bad)'
s += ', tmin : %s (s)' % self.tmin
s += ', tmax : %s (s)' % self.tmax
s += ', baseline : %s' % str(self.baseline)
if len(self.event_id) > 1:
counts = ['%r: %i' % (k, sum(self.events[:, 2] == v))
for k, v in sorted(self.event_id.items())]
s += ',\n %s' % ', '.join(counts)
class_name = self.__class__.__name__
if class_name == '_BaseEpochs':
class_name = 'Epochs'
return '<%s | %s>' % (class_name, s)
def _key_match(self, key):
"""Helper function for event dict use"""
if key not in self.event_id:
raise KeyError('Event "%s" is not in Epochs.' % key)
return self.events[:, 2] == self.event_id[key]
def __getitem__(self, item):
"""Return an Epochs object with a copied subset of epochs
Parameters
----------
item : slice, array-like, str, or list
See below for use cases.
Returns
-------
epochs : instance of Epochs
See below for use cases.
Notes
-----
Epochs can be accessed as ``epochs[...]`` in several ways:
1. ``epochs[idx]``: Return ``Epochs`` object with a subset of
epochs (supports single index and python-style slicing).
2. ``epochs['name']``: Return ``Epochs`` object with a copy of the
subset of epochs corresponding to an experimental condition as
specified by 'name'.
If conditions are tagged by names separated by '/' (e.g.
'audio/left', 'audio/right'), and 'name' is not in itself an
event key, this selects every event whose condition contains
the 'name' tag (e.g., 'left' matches 'audio/left' and
'visual/left'; but not 'audio_left'). Note that tags like
'auditory/left' and 'left/auditory' will be treated the
same way when accessed using tags.
3. ``epochs[['name_1', 'name_2', ... ]]``: Return ``Epochs`` object
with a copy of the subset of epochs corresponding to multiple
experimental conditions as specified by
``'name_1', 'name_2', ...`` .
If conditions are separated by '/', selects every item containing
every list tag (e.g. ['audio', 'left'] selects 'audio/left' and
'audio/center/left', but not 'audio/right').
"""
data = self._data
del self._data
epochs = self.copy()
self._data, epochs._data = data, data
del self
key = item
del item
if isinstance(key, string_types):
key = [key]
if isinstance(key, (list, tuple)) and isinstance(key[0], string_types):
if any('/' in k_i for k_i in epochs.event_id.keys()):
if any(k_e not in epochs.event_id for k_e in key):
# Select a given key if the requested set of
# '/'-separated types are a subset of the types in that key
key = [k for k in epochs.event_id.keys()
if all(set(k_i.split('/')).issubset(k.split('/'))
for k_i in key)]
if len(key) == 0:
raise KeyError('Attempting selection of events via '
'multiple/partial matching, but no '
'event matches all criteria.')
select = np.any(np.atleast_2d([epochs._key_match(k)
for k in key]), axis=0)
epochs.name = '+'.join(key)
else:
select = key if isinstance(key, slice) else np.atleast_1d(key)
key_selection = epochs.selection[select]
for k in np.setdiff1d(epochs.selection, key_selection):
epochs.drop_log[k] = ['IGNORED']
epochs.selection = key_selection
epochs.events = np.atleast_2d(epochs.events[select])
if epochs.preload:
# ensure that each Epochs instance owns its own data so we can
# resize later if necessary
epochs._data = np.require(epochs._data[select], requirements=['O'])
# update event id to reflect new content of epochs
epochs.event_id = dict((k, v) for k, v in epochs.event_id.items()
if v in epochs.events[:, 2])
return epochs
def crop(self, tmin=None, tmax=None):
"""Crops a time interval from epochs object.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
Returns
-------
epochs : instance of Epochs
The cropped epochs.
Notes
-----
Unlike Python slices, MNE time intervals include both their end points;
crop(tmin, tmax) returns the interval tmin <= t <= tmax.
"""
# XXX this could be made to work on non-preloaded data...
if not self.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn('tmin is not in epochs time interval. tmin is set to '
'epochs.tmin')
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn('tmax is not in epochs time interval. tmax is set to '
'epochs.tmax')
tmax = self.tmax
tmask = _time_mask(self.times, tmin, tmax, sfreq=self.info['sfreq'])
self.times = self.times[tmask]
self._raw_times = self._raw_times[tmask]
self._data = self._data[:, :, tmask]
return self
@verbose
def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,
verbose=None):
"""Resample preloaded data
Parameters
----------
sfreq : float
New sample rate to use
npad : int | str
Amount to pad the start and end of the data.
Can also be "auto" to use a padding that will result in
a power-of-two size (can be much faster).
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Returns
-------
epochs : instance of Epochs
The resampled epochs object.
See Also
--------
mne.Epochs.savgol_filter
mne.io.Raw.resample
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
"""
# XXX this could operate on non-preloaded data, too
if not self.preload:
raise RuntimeError('Can only resample preloaded data')
o_sfreq = self.info['sfreq']
self._data = resample(self._data, sfreq, o_sfreq, npad, window=window,
n_jobs=n_jobs)
# adjust indirectly affected variables
self.info['sfreq'] = float(sfreq)
self.times = (np.arange(self._data.shape[2], dtype=np.float) /
sfreq + self.times[0])
return self
def copy(self):
"""Return copy of Epochs instance"""
raw = self._raw
del self._raw
new = deepcopy(self)
self._raw = raw
new._raw = raw
return new
def save(self, fname, split_size='2GB'):
"""Save epochs in a fif file
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or
-epo.fif.gz.
split_size : string | int
Large raw files are automatically split into multiple pieces. This
parameter specifies the maximum size of each piece. If the
parameter is an integer, it specifies the size in Bytes. It is
also possible to pass a human-readable string, e.g., 100MB.
Note: Due to FIFF file limitations, the maximum split size is 2GB.
.. versionadded:: 0.10.0
Notes
-----
Bad epochs will be dropped before saving the epochs to disk.
"""
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
split_size = _get_split_size(split_size)
# to know the length accurately. The get_data() call would drop
# bad epochs anyway
self.drop_bad()
total_size = self[0].get_data().nbytes * len(self)
n_parts = int(np.ceil(total_size / float(split_size)))
epoch_idxs = np.array_split(np.arange(len(self)), n_parts)
for part_idx, epoch_idx in enumerate(epoch_idxs):
this_epochs = self[epoch_idx] if n_parts > 1 else self
# avoid missing event_ids in splits
this_epochs.event_id = self.event_id
_save_split(this_epochs, fname, part_idx, n_parts)
def equalize_event_counts(self, event_ids, method='mintime', copy=None):
"""Equalize the number of trials in each condition
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be
some time-varying (like on the scale of minutes) noise characteristics
during a recording, they could be compensated for (to some extent) in
the equalization process. This method thus seeks to reduce any of
those effects by minimizing the differences in the times of the events
in the two sets of epochs. For example, if one had event times
[1, 2, 3, 4, 120, 121] and the other one had [3.5, 4.5, 120.5, 121.5],
it would remove events at times [1, 2] in the first epochs and not
[20, 21].
Parameters
----------
event_ids : list
The event types to equalize. Each entry in the list can either be
a str (single event) or a list of str. In the case where one of
the entries is a list of str, event_ids in that list will be
grouped together before equalizing trial counts across conditions.
In the case where partial matching is used (using '/' in
`event_ids`), `event_ids` will be matched according to the
provided tags, that is, processing works as if the event_ids
matched by the provided tags had been supplied instead.
The event_ids must identify nonoverlapping subsets of the epochs.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list
will be minimized.
copy : bool
This parameter has been deprecated and will be removed in 0.14.
Use inst.copy() instead.
Whether to return a new instance or modify in place.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
indices : array of int
Indices from the original events list that were dropped.
Notes
-----
For example (if epochs.event_id was {'Left': 1, 'Right': 2,
'Nonspatial':3}:
epochs.equalize_event_counts([['Left', 'Right'], 'Nonspatial'])
would equalize the number of trials in the 'Nonspatial' condition with
the total number of trials in the 'Left' and 'Right' conditions.
If multiple indices are provided (e.g. 'Left' and 'Right' in the
example above), it is not guaranteed that after equalization, the
conditions will contribute evenly. E.g., it is possible to end up
with 70 'Nonspatial' trials, 69 'Left' and 1 'Right'.
"""
epochs = _check_copy_dep(self, copy)
if len(event_ids) == 0:
raise ValueError('event_ids must have at least one element')
if not epochs._bad_dropped:
epochs.drop_bad()
# figure out how to equalize
eq_inds = list()
# deal with hierarchical tags
ids = epochs.event_id
orig_ids = list(event_ids)
tagging = False
if "/" in "".join(ids):
# make string inputs a list of length 1
event_ids = [[x] if isinstance(x, string_types) else x
for x in event_ids]
for ids_ in event_ids: # check if tagging is attempted
if any([id_ not in ids for id_ in ids_]):
tagging = True
# 1. treat everything that's not in event_id as a tag
# 2a. for tags, find all the event_ids matched by the tags
# 2b. for non-tag ids, just pass them directly
# 3. do this for every input
event_ids = [[k for k in ids if all((tag in k.split("/")
for tag in id_))] # find ids matching all tags
if all(id__ not in ids for id__ in id_)
else id_ # straight pass for non-tag inputs
for id_ in event_ids]
for ii, id_ in enumerate(event_ids):
if len(id_) == 0:
raise KeyError(orig_ids[ii] + "not found in the "
"epoch object's event_id.")
elif len(set([sub_id in ids for sub_id in id_])) != 1:
err = ("Don't mix hierarchical and regular event_ids"
" like in \'%s\'." % ", ".join(id_))
raise ValueError(err)
# raise for non-orthogonal tags
if tagging is True:
events_ = [set(epochs[x].events[:, 0]) for x in event_ids]
doubles = events_[0].intersection(events_[1])
if len(doubles):
raise ValueError("The two sets of epochs are "
"overlapping. Provide an "
"orthogonal selection.")
for eq in event_ids:
eq = np.atleast_1d(eq)
# eq is now a list of types
key_match = np.zeros(epochs.events.shape[0])
for key in eq:
key_match = np.logical_or(key_match, epochs._key_match(key))
eq_inds.append(np.where(key_match)[0])
event_times = [epochs.events[e, 0] for e in eq_inds]
indices = _get_drop_indices(event_times, method)
# need to re-index indices
indices = np.concatenate([e[idx] for e, idx in zip(eq_inds, indices)])
epochs.drop(indices, reason='EQUALIZED_COUNT')
# actually remove the indices
return epochs, indices
def _check_baseline(baseline, tmin, tmax, sfreq):
"""Helper to check for a valid baseline"""
if baseline is not None:
if not isinstance(baseline, tuple) or len(baseline) != 2:
raise ValueError('`baseline=%s` is an invalid argument.'
% str(baseline))
baseline_tmin, baseline_tmax = baseline
tstep = 1. / float(sfreq)
if baseline_tmin is None:
baseline_tmin = tmin
baseline_tmin = float(baseline_tmin)
if baseline_tmax is None:
baseline_tmax = tmax
baseline_tmax = float(baseline_tmax)
if baseline_tmin < tmin - tstep:
raise ValueError(
"Baseline interval (tmin = %s) is outside of epoch "
"data (tmin = %s)" % (baseline_tmin, tmin))
if baseline_tmax > tmax + tstep:
raise ValueError(
"Baseline interval (tmax = %s) is outside of epoch "
"data (tmax = %s)" % (baseline_tmax, tmax))
if baseline_tmin > baseline_tmax:
raise ValueError(
"Baseline min (%s) must be less than baseline max (%s)"
% (baseline_tmin, baseline_tmax))
del baseline_tmin, baseline_tmax
def _drop_log_stats(drop_log, ignore=('IGNORED',)):
"""
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
raise ValueError('drop_log must be a list of lists')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
class Epochs(_BaseEpochs):
"""Epochs extracted from a Raw instance
Parameters
----------
raw : Raw object
An instance of Raw.
events : array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event. If nothing is provided, defaults to -0.2
tmax : float
End time after event. If nothing is provided, defaults to 0.5
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
picks : array-like of int | None (default)
Indices of channels to include (if None, all channels are used).
name : string
Comment that describes the Epochs data created.
preload : boolean
Load all epochs from disk when creating the object
or wait before accessing each epoch (more memory
efficient but can be slower).
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
decim : int
Factor by which to downsample the data from the raw file upon import.
Warning: This simply selects every nth sample, data is not filtered
here. If data is not properly filtered, aliasing artifacts may occur.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
detrend : int | None
If 0 or 1, the data channels (MEG and EEG) will be detrended when
loaded. 0 is a constant (DC) detrend, 1 is a linear detrend. None
is no detrending. Note that detrending is performed before baseline
correction. If no DC offset is preferred (zeroth order detrending),
either turn off baseline correction, as this may introduce a DC
shift, or set baseline correction to use the entire time interval
(will yield equivalent results but be slower).
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
on_missing : str
What to do if one or several event ids are not found in the recording.
Valid keys are 'error' | 'warning' | 'ignore'
Default is 'error'. If on_missing is 'warning' it will proceed but
warn, if 'ignore' it will proceed silently. Note.
If none of the event ids are found in the data, an error will be
automatically generated irrespective of this parameter.
reject_by_annotation : bool
Whether to reject based on annotations. If True (default), epochs
overlapping with segments whose description begins with ``'bad'`` are
rejected. If False, no rejection based on annotations is performed.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Attributes
----------
info: dict
Measurement info.
event_id : dict
Names of conditions corresponding to event_ids.
ch_names : list of string
List of channel names.
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
preload : bool
Indicates whether epochs are in memory.
drop_log : list of lists
A list of the same length as the event array used to initialize the
Epochs object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty list; otherwise it will be
a list of the reasons the event is not longer in the selection, e.g.:
'IGNORED' if it isn't part of the current subset defined by the user;
'NO_DATA' or 'TOO_SHORT' if epoch didn't contain enough data;
names of channels that exceeded the amplitude threshold;
'EQUALIZED_COUNTS' (see equalize_event_counts);
or 'USER' for user-defined reasons (see drop method).
verbose : bool, str, int, or None
See above.
See Also
--------
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
Notes
-----
When accessing data, Epochs are detrended, baseline-corrected, and
decimated, then projectors are (optionally) applied.
For indexing and slicing using ``epochs[...]``, see
:func:`mne.Epochs.__getitem__`.
"""
@verbose
def __init__(self, raw, events, event_id=None, tmin=-0.2, tmax=0.5,
baseline=(None, 0), picks=None, name='Unknown', preload=False,
reject=None, flat=None, proj=True, decim=1, reject_tmin=None,
reject_tmax=None, detrend=None, add_eeg_ref=True,
on_missing='error', reject_by_annotation=True, verbose=None):
if not isinstance(raw, _BaseRaw):
raise ValueError('The first argument to `Epochs` must be an '
'instance of `mne.io.Raw`')
info = deepcopy(raw.info)
# proj is on when applied in Raw
proj = proj or raw.proj
self.reject_by_annotation = reject_by_annotation
# call _BaseEpochs constructor
super(Epochs, self).__init__(
info, None, events, event_id, tmin, tmax, baseline=baseline,
raw=raw, picks=picks, name=name, reject=reject, flat=flat,
decim=decim, reject_tmin=reject_tmin, reject_tmax=reject_tmax,
detrend=detrend, add_eeg_ref=add_eeg_ref, proj=proj,
on_missing=on_missing, preload_at_end=preload, verbose=verbose)
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk
Returns
-------
data : array | str | None
If string it's details on rejection reason.
If None it means no data.
"""
if self._raw is None:
# This should never happen, as raw=None only if preload=True
raise ValueError('An error has occurred, no valid raw file found.'
' Please report this to the mne-python '
'developers.')
sfreq = self._raw.info['sfreq']
event_samp = self.events[idx, 0]
# Read a data segment
first_samp = self._raw.first_samp
start = int(round(event_samp + self.tmin * sfreq)) - first_samp
stop = start + len(self._raw_times)
data = self._raw._check_bad_segment(start, stop, self.picks,
self.reject_by_annotation)
return data
class EpochsArray(_BaseEpochs):
"""Epochs object from numpy array
Parameters
----------
data : array, shape (n_epochs, n_channels, n_times)
The channels' time series for each epoch.
info : instance of Info
Info dictionary. Consider using ``create_info`` to populate
this structure.
events : None | array of int, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
If None (default), all event values are set to 1 and event time-samples
are set to range(n_epochs).
tmin : float
Start time before event. If nothing provided, defaults to -0.2.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
baseline : None or tuple of length 2 (default: None)
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
proj : bool | 'delayed'
Apply SSP projection vectors. See :class:`mne.Epochs` for details.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
io.RawArray, EvokedArray, create_info
"""
@verbose
def __init__(self, data, info, events=None, tmin=0, event_id=None,
reject=None, flat=None, reject_tmin=None,
reject_tmax=None, baseline=None, proj=True, verbose=None):
dtype = np.complex128 if np.any(np.iscomplex(data)) else np.float64
data = np.asanyarray(data, dtype=dtype)
if data.ndim != 3:
raise ValueError('Data must be a 3D array of shape (n_epochs, '
'n_channels, n_samples)')
if len(info['ch_names']) != data.shape[1]:
raise ValueError('Info and data must have same number of '
'channels.')
if events is None:
n_epochs = len(data)
events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),
np.ones(n_epochs, int)]
if data.shape[0] != len(events):
raise ValueError('The number of epochs and the number of events'
'must match')
info = deepcopy(info) # do not modify original info
tmax = (data.shape[2] - 1) / info['sfreq'] + tmin
if event_id is None: # convert to int to make typing-checks happy
event_id = dict((str(e), int(e)) for e in np.unique(events[:, 2]))
super(EpochsArray, self).__init__(info, data, events, event_id, tmin,
tmax, baseline, reject=reject,
flat=flat, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, decim=1,
add_eeg_ref=False, proj=proj)
if len(events) != in1d(self.events[:, 2],
list(self.event_id.values())).sum():
raise ValueError('The events must only contain event numbers from '
'event_id')
for ii, e in enumerate(self._data):
# This is safe without assignment b/c there is no decim
self._detrend_offset_decim(e)
self.drop_bad()
def combine_event_ids(epochs, old_event_ids, new_event_id, copy=True):
"""Collapse event_ids from an epochs instance into a new event_id
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
old_event_ids : str, or list
Conditions to collapse together.
new_event_id : dict, or int
A one-element dict (or a single integer) for the new
condition. Note that for safety, this cannot be any
existing id (in epochs.event_id.values()).
copy : bool
Whether to return a new instance or modify in place.
Notes
-----
This For example (if epochs.event_id was {'Left': 1, 'Right': 2}:
combine_event_ids(epochs, ['Left', 'Right'], {'Directional': 12})
would create a 'Directional' entry in epochs.event_id replacing
'Left' and 'Right' (combining their trials).
"""
epochs = epochs.copy() if copy else epochs
old_event_ids = np.asanyarray(old_event_ids)
if isinstance(new_event_id, int):
new_event_id = {str(new_event_id): new_event_id}
else:
if not isinstance(new_event_id, dict):
raise ValueError('new_event_id must be a dict or int')
if not len(list(new_event_id.keys())) == 1:
raise ValueError('new_event_id dict must have one entry')
new_event_num = list(new_event_id.values())[0]
if not isinstance(new_event_num, int):
raise ValueError('new_event_id value must be an integer')
if new_event_num in epochs.event_id.values():
raise ValueError('new_event_id value must not already exist')
# could use .pop() here, but if a latter one doesn't exist, we're
# in trouble, so run them all here and pop() later
old_event_nums = np.array([epochs.event_id[key] for key in old_event_ids])
# find the ones to replace
inds = np.any(epochs.events[:, 2][:, np.newaxis] ==
old_event_nums[np.newaxis, :], axis=1)
# replace the event numbers in the events list
epochs.events[inds, 2] = new_event_num
# delete old entries
for key in old_event_ids:
epochs.event_id.pop(key)
# add the new entry
epochs.event_id.update(new_event_id)
return epochs
def equalize_epoch_counts(epochs_list, method='mintime'):
"""Equalize the number of trials in multiple Epoch instances
It tries to make the remaining epochs occurring as close as possible in
time. This method works based on the idea that if there happened to be some
time-varying (like on the scale of minutes) noise characteristics during
a recording, they could be compensated for (to some extent) in the
equalization process. This method thus seeks to reduce any of those effects
by minimizing the differences in the times of the events in the two sets of
epochs. For example, if one had event times [1, 2, 3, 4, 120, 121] and the
other one had [3.5, 4.5, 120.5, 121.5], it would remove events at times
[1, 2] in the first epochs and not [120, 121].
Note that this operates on the Epochs instances in-place.
Example:
equalize_epoch_counts(epochs1, epochs2)
Parameters
----------
epochs_list : list of Epochs instances
The Epochs instances to equalize trial counts for.
method : str
If 'truncate', events will be truncated from the end of each event
list. If 'mintime', timing differences between each event list will be
minimized.
"""
if not all(isinstance(e, _BaseEpochs) for e in epochs_list):
raise ValueError('All inputs must be Epochs instances')
# make sure bad epochs are dropped
for e in epochs_list:
if not e._bad_dropped:
e.drop_bad()
event_times = [e.events[:, 0] for e in epochs_list]
indices = _get_drop_indices(event_times, method)
for e, inds in zip(epochs_list, indices):
e.drop(inds, reason='EQUALIZED_COUNT')
def _get_drop_indices(event_times, method):
"""Helper to get indices to drop from multiple event timing lists"""
small_idx = np.argmin([e.shape[0] for e in event_times])
small_e_times = event_times[small_idx]
if method not in ['mintime', 'truncate']:
raise ValueError('method must be either mintime or truncate, not '
'%s' % method)
indices = list()
for e in event_times:
if method == 'mintime':
mask = _minimize_time_diff(small_e_times, e)
else:
mask = np.ones(e.shape[0], dtype=bool)
mask[small_e_times.shape[0]:] = False
indices.append(np.where(np.logical_not(mask))[0])
return indices
def _fix_fill(fill):
"""Helper to fix bug on old scipy"""
if LooseVersion(scipy.__version__) < LooseVersion('0.12'):
fill = fill[:, np.newaxis]
return fill
def _minimize_time_diff(t_shorter, t_longer):
"""Find a boolean mask to minimize timing differences"""
from scipy.interpolate import interp1d
keep = np.ones((len(t_longer)), dtype=bool)
if len(t_shorter) == 0:
keep.fill(False)
return keep
scores = np.ones((len(t_longer)))
x1 = np.arange(len(t_shorter))
# The first set of keep masks to test
kwargs = dict(copy=False, bounds_error=False)
# this is a speed tweak, only exists for certain versions of scipy
if 'assume_sorted' in _get_args(interp1d.__init__):
kwargs['assume_sorted'] = True
shorter_interp = interp1d(x1, t_shorter, fill_value=t_shorter[-1],
**kwargs)
for ii in range(len(t_longer) - len(t_shorter)):
scores.fill(np.inf)
# set up the keep masks to test, eliminating any rows that are already
# gone
keep_mask = ~np.eye(len(t_longer), dtype=bool)[keep]
keep_mask[:, ~keep] = False
# Check every possible removal to see if it minimizes
x2 = np.arange(len(t_longer) - ii - 1)
t_keeps = np.array([t_longer[km] for km in keep_mask])
longer_interp = interp1d(x2, t_keeps, axis=1,
fill_value=_fix_fill(t_keeps[:, -1]),
**kwargs)
d1 = longer_interp(x1) - t_shorter
d2 = shorter_interp(x2) - t_keeps
scores[keep] = np.abs(d1, d1).sum(axis=1) + np.abs(d2, d2).sum(axis=1)
keep[np.argmin(scores)] = False
return keep
@verbose
def _is_good(e, ch_names, channel_type_idx, reject, flat, full_report=False,
ignore_chs=[], verbose=None):
"""Test if data segment e is good according to the criteria
defined in reject and flat. If full_report=True, it will give
True/False as well as a list of all offending channels.
"""
bad_list = list()
has_printed = False
checkable = np.ones(len(ch_names), dtype=bool)
checkable[np.array([c in ignore_chs
for c in ch_names], dtype=bool)] = False
for refl, f, t in zip([reject, flat], [np.greater, np.less], ['', 'flat']):
if refl is not None:
for key, thresh in iteritems(refl):
idx = channel_type_idx[key]
name = key.upper()
if len(idx) > 0:
e_idx = e[idx]
deltas = np.max(e_idx, axis=1) - np.min(e_idx, axis=1)
checkable_idx = checkable[idx]
idx_deltas = np.where(np.logical_and(f(deltas, thresh),
checkable_idx))[0]
if len(idx_deltas) > 0:
ch_name = [ch_names[idx[i]] for i in idx_deltas]
if (not has_printed):
logger.info(' Rejecting %s epoch based on %s : '
'%s' % (t, name, ch_name))
has_printed = True
if not full_report:
return False
else:
bad_list.extend(ch_name)
if not full_report:
return True
else:
if bad_list == []:
return True, None
else:
return False, bad_list
def _read_one_epoch_file(f, tree, fname, preload):
"""Helper to read a single FIF file"""
with f as fid:
# Read the measurement info
info, meas = read_meas_info(fid, tree, clean_bads=True)
info['filename'] = fname
events, mappings = _read_events_fif(fid, tree)
# Locate the data of interest
processed = dir_tree_find(meas, FIFF.FIFFB_PROCESSED_DATA)
if len(processed) == 0:
raise ValueError('Could not find processed data')
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
# before version 0.11 we errantly saved with this tag instead of
# an MNE tag
epochs_node = dir_tree_find(tree, FIFF.FIFFB_MNE_EPOCHS)
if len(epochs_node) == 0:
epochs_node = dir_tree_find(tree, 122) # 122 used before v0.11
if len(epochs_node) == 0:
raise ValueError('Could not find epochs data')
my_epochs = epochs_node[0]
# Now find the data in the block
name = None
data = None
data_tag = None
bmin, bmax = None, None
baseline = None
selection = None
drop_log = None
for k in range(my_epochs['nent']):
kind = my_epochs['directory'][k].kind
pos = my_epochs['directory'][k].pos
if kind == FIFF.FIFF_FIRST_SAMPLE:
tag = read_tag(fid, pos)
first = int(tag.data)
elif kind == FIFF.FIFF_LAST_SAMPLE:
tag = read_tag(fid, pos)
last = int(tag.data)
elif kind == FIFF.FIFF_COMMENT:
tag = read_tag(fid, pos)
name = tag.data
elif kind == FIFF.FIFF_EPOCH:
# delay reading until later
fid.seek(pos, 0)
data_tag = read_tag_info(fid)
data_tag.pos = pos
elif kind in [FIFF.FIFF_MNE_BASELINE_MIN, 304]:
# Constant 304 was used before v0.11
tag = read_tag(fid, pos)
bmin = float(tag.data)
elif kind in [FIFF.FIFF_MNE_BASELINE_MAX, 305]:
# Constant 305 was used before v0.11
tag = read_tag(fid, pos)
bmax = float(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_SELECTION:
tag = read_tag(fid, pos)
selection = np.array(tag.data)
elif kind == FIFF.FIFFB_MNE_EPOCHS_DROP_LOG:
tag = read_tag(fid, pos)
drop_log = json.loads(tag.data)
if bmin is not None or bmax is not None:
baseline = (bmin, bmax)
n_samp = last - first + 1
logger.info(' Found the data of interest:')
logger.info(' t = %10.2f ... %10.2f ms (%s)'
% (1000 * first / info['sfreq'],
1000 * last / info['sfreq'], name))
if info['comps'] is not None:
logger.info(' %d CTF compensation matrices available'
% len(info['comps']))
# Inspect the data
if data_tag is None:
raise ValueError('Epochs data not found')
epoch_shape = (len(info['ch_names']), n_samp)
expected = len(events) * np.prod(epoch_shape)
if data_tag.size // 4 - 4 != expected: # 32-bit floats stored
raise ValueError('Incorrect number of samples (%d instead of %d)'
% (data_tag.size // 4, expected))
# Calibration factors
cals = np.array([[info['chs'][k]['cal'] *
info['chs'][k].get('scale', 1.0)]
for k in range(info['nchan'])], np.float64)
# Read the data
if preload:
data = read_tag(fid, data_tag.pos).data.astype(np.float64)
data *= cals[np.newaxis, :, :]
# Put it all together
tmin = first / info['sfreq']
tmax = last / info['sfreq']
event_id = (dict((str(e), e) for e in np.unique(events[:, 2]))
if mappings is None else mappings)
# In case epochs didn't have a FIFF.FIFFB_MNE_EPOCHS_SELECTION tag
# (version < 0.8):
if selection is None:
selection = np.arange(len(events))
if drop_log is None:
drop_log = [[] for _ in range(len(epochs))] # noqa, analysis:ignore
return (info, data, data_tag, events, event_id, tmin, tmax, baseline, name,
selection, drop_log, epoch_shape, cals)
@verbose
def read_epochs(fname, proj=True, add_eeg_ref=False, preload=True,
verbose=None):
"""Read epochs from a fif file
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or -epo.fif.gz.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
Returns
-------
epochs : instance of Epochs
The epochs
"""
return EpochsFIF(fname, proj, add_eeg_ref, preload, verbose)
class _RawContainer(object):
def __init__(self, fid, data_tag, event_samps, epoch_shape, cals):
self.fid = fid
self.data_tag = data_tag
self.event_samps = event_samps
self.epoch_shape = epoch_shape
self.cals = cals
self.proj = False
def __del__(self):
self.fid.close()
class EpochsFIF(_BaseEpochs):
"""Epochs read from disk
Parameters
----------
fname : str
The name of the file, which should end with -epo.fif or -epo.fif.gz.
proj : bool | 'delayed'
Apply SSP projection vectors. If proj is 'delayed' and reject is not
None the single epochs will be projected before the rejection
decision, but used in unprojected state if they are kept.
This way deciding which projection vectors are good can be postponed
to the evoked stage without resulting in lower epoch counts and
without producing results different from early SSP application
given comparable parameters. Note that in this case baselining,
detrending and temporal decimation will be postponed.
If proj is False no projections will be applied which is the
recommended value if SSPs are not used for cleaning the data.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless one
already exists).
preload : bool
If True, read all epochs from disk immediately. If False, epochs will
be read on demand.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to raw.verbose.
See Also
--------
mne.Epochs
mne.epochs.combine_event_ids
mne.Epochs.equalize_event_counts
"""
@verbose
def __init__(self, fname, proj=True, add_eeg_ref=True, preload=True,
verbose=None):
check_fname(fname, 'epochs', ('-epo.fif', '-epo.fif.gz'))
fnames = [fname]
ep_list = list()
raw = list()
for fname in fnames:
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
next_fname = _get_next_fname(fid, fname, tree)
(info, data, data_tag, events, event_id, tmin, tmax, baseline,
name, selection, drop_log, epoch_shape, cals) = \
_read_one_epoch_file(fid, tree, fname, preload)
# here we ignore missing events, since users should already be
# aware of missing events if they have saved data that way
epoch = _BaseEpochs(
info, data, events, event_id, tmin, tmax, baseline,
on_missing='ignore', selection=selection, drop_log=drop_log,
add_eeg_ref=False, proj=False, verbose=False)
ep_list.append(epoch)
if not preload:
# store everything we need to index back to the original data
raw.append(_RawContainer(fiff_open(fname)[0], data_tag,
events[:, 0].copy(), epoch_shape,
cals))
if next_fname is not None:
fnames.append(next_fname)
(info, data, events, event_id, tmin, tmax, baseline, selection,
drop_log, _) = _concatenate_epochs(ep_list, with_data=preload)
# we need this uniqueness for non-preloaded data to work properly
if len(np.unique(events[:, 0])) != len(events):
raise RuntimeError('Event time samples were not unique')
# correct the drop log
assert len(drop_log) % len(fnames) == 0
step = len(drop_log) // len(fnames)
offsets = np.arange(step, len(drop_log) + 1, step)
for i1, i2 in zip(offsets[:-1], offsets[1:]):
other_log = drop_log[i1:i2]
for k, (a, b) in enumerate(zip(drop_log, other_log)):
if a == ['IGNORED'] and b != ['IGNORED']:
drop_log[k] = b
drop_log = drop_log[:step]
# call _BaseEpochs constructor
super(EpochsFIF, self).__init__(
info, data, events, event_id, tmin, tmax, baseline, raw=raw,
name=name, proj=proj, add_eeg_ref=add_eeg_ref,
preload_at_end=False, on_missing='ignore', selection=selection,
drop_log=drop_log, verbose=verbose)
# use the private property instead of drop_bad so that epochs
# are not all read from disk for preload=False
self._bad_dropped = True
@verbose
def _get_epoch_from_raw(self, idx, verbose=None):
"""Load one epoch from disk"""
# Find the right file and offset to use
event_samp = self.events[idx, 0]
for raw in self._raw:
idx = np.where(raw.event_samps == event_samp)[0]
if len(idx) == 1:
idx = idx[0]
size = np.prod(raw.epoch_shape) * 4
offset = idx * size
break
else:
# read the correct subset of the data
raise RuntimeError('Correct epoch could not be found, please '
'contact mne-python developers')
# the following is equivalent to this, but faster:
#
# >>> data = read_tag(raw.fid, raw.data_tag.pos).data.astype(float)
# >>> data *= raw.cals[np.newaxis, :, :]
# >>> data = data[idx]
#
# Eventually this could be refactored in io/tag.py if other functions
# could make use of it
raw.fid.seek(raw.data_tag.pos + offset + 16, 0) # 16 = Tag header
data = np.fromstring(raw.fid.read(size), '>f4').astype(np.float64)
data.shape = raw.epoch_shape
data *= raw.cals
return data
def bootstrap(epochs, random_state=None):
"""Compute epochs selected by bootstrapping
Parameters
----------
epochs : Epochs instance
epochs data to be bootstrapped
random_state : None | int | np.random.RandomState
To specify the random generator state
Returns
-------
epochs : Epochs instance
The bootstrap samples
"""
if not epochs.preload:
raise RuntimeError('Modifying data of epochs is only supported '
'when preloading is used. Use preload=True '
'in the constructor.')
rng = check_random_state(random_state)
epochs_bootstrap = epochs.copy()
n_events = len(epochs_bootstrap.events)
idx = rng.randint(0, n_events, n_events)
epochs_bootstrap = epochs_bootstrap[idx]
return epochs_bootstrap
def _check_merge_epochs(epochs_list):
"""Aux function"""
if len(set(tuple(epochs.event_id.items()) for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for event_id")
if len(set(epochs.tmin for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for tmin")
if len(set(epochs.tmax for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for tmax")
if len(set(epochs.baseline for epochs in epochs_list)) != 1:
raise NotImplementedError("Epochs with unequal values for baseline")
@verbose
def add_channels_epochs(epochs_list, name='Unknown', add_eeg_ref=True,
verbose=None):
"""Concatenate channels, info and data from two Epochs objects
Parameters
----------
epochs_list : list of Epochs
Epochs object to concatenate.
name : str
Comment that describes the Epochs data created.
add_eeg_ref : bool
If True, an EEG average reference will be added (unless there is no
EEG in the data).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to True if any of the input epochs have verbose=True.
Returns
-------
epochs : instance of Epochs
Concatenated epochs.
"""
if not all(e.preload for e in epochs_list):
raise ValueError('All epochs must be preloaded.')
info = _merge_info([epochs.info for epochs in epochs_list])
data = [epochs.get_data() for epochs in epochs_list]
_check_merge_epochs(epochs_list)
for d in data:
if len(d) != len(data[0]):
raise ValueError('all epochs must be of the same length')
data = np.concatenate(data, axis=1)
if len(info['chs']) != data.shape[1]:
err = "Data shape does not match channel number in measurement info"
raise RuntimeError(err)
events = epochs_list[0].events.copy()
all_same = all(np.array_equal(events, epochs.events)
for epochs in epochs_list[1:])
if not all_same:
raise ValueError('Events must be the same.')
proj = any(e.proj for e in epochs_list) or add_eeg_ref
if verbose is None:
verbose = any(e.verbose for e in epochs_list)
epochs = epochs_list[0].copy()
epochs.info = info
epochs.picks = None
epochs.name = name
epochs.verbose = verbose
epochs.events = events
epochs.preload = True
epochs._bad_dropped = True
epochs._data = data
epochs._projector, epochs.info = setup_proj(epochs.info, add_eeg_ref,
activate=proj)
return epochs
def _compare_epochs_infos(info1, info2, ind):
"""Compare infos"""
info1._check_consistency()
info2._check_consistency()
if info1['nchan'] != info2['nchan']:
raise ValueError('epochs[%d][\'info\'][\'nchan\'] must match' % ind)
if info1['bads'] != info2['bads']:
raise ValueError('epochs[%d][\'info\'][\'bads\'] must match' % ind)
if info1['sfreq'] != info2['sfreq']:
raise ValueError('epochs[%d][\'info\'][\'sfreq\'] must match' % ind)
if set(info1['ch_names']) != set(info2['ch_names']):
raise ValueError('epochs[%d][\'info\'][\'ch_names\'] must match' % ind)
if len(info2['projs']) != len(info1['projs']):
raise ValueError('SSP projectors in epochs files must be the same')
if any(not _proj_equal(p1, p2) for p1, p2 in
zip(info2['projs'], info1['projs'])):
raise ValueError('SSP projectors in epochs files must be the same')
def _concatenate_epochs(epochs_list, with_data=True):
"""Auxiliary function for concatenating epochs."""
out = epochs_list[0]
data = [out.get_data()] if with_data else None
events = [out.events]
baseline, tmin, tmax = out.baseline, out.tmin, out.tmax
info = deepcopy(out.info)
verbose = out.verbose
drop_log = deepcopy(out.drop_log)
event_id = deepcopy(out.event_id)
selection = out.selection
for ii, epochs in enumerate(epochs_list[1:]):
_compare_epochs_infos(epochs.info, info, ii)
if not np.array_equal(epochs.times, epochs_list[0].times):
raise ValueError('Epochs must have same times')
if epochs.baseline != baseline:
raise ValueError('Baseline must be same for all epochs')
if with_data:
data.append(epochs.get_data())
events.append(epochs.events)
selection = np.concatenate((selection, epochs.selection))
drop_log.extend(epochs.drop_log)
event_id.update(epochs.event_id)
events = np.concatenate(events, axis=0)
if with_data:
data = np.concatenate(data, axis=0)
return (info, data, events, event_id, tmin, tmax, baseline, selection,
drop_log, verbose)
def _finish_concat(info, data, events, event_id, tmin, tmax, baseline,
selection, drop_log, verbose):
"""Helper to finish concatenation for epochs not read from disk"""
events[:, 0] = np.arange(len(events)) # arbitrary after concat
selection = np.where([len(d) == 0 for d in drop_log])[0]
out = _BaseEpochs(info, data, events, event_id, tmin, tmax,
baseline=baseline, add_eeg_ref=False,
selection=selection, drop_log=drop_log,
proj=False, on_missing='ignore', verbose=verbose)
out.drop_bad()
return out
def concatenate_epochs(epochs_list):
"""Concatenate a list of epochs into one epochs object
Parameters
----------
epochs_list : list
list of Epochs instances to concatenate (in order).
Returns
-------
epochs : instance of Epochs
The result of the concatenation (first Epochs instance passed in).
Notes
-----
.. versionadded:: 0.9.0
"""
return _finish_concat(*_concatenate_epochs(epochs_list))
@verbose
def average_movements(epochs, head_pos=None, orig_sfreq=None, picks=None,
origin='auto', weight_all=True, int_order=8, ext_order=3,
destination=None, ignore_ref=False, return_mapping=False,
verbose=None):
"""Average data using Maxwell filtering, transforming using head positions
Parameters
----------
epochs : instance of Epochs
The epochs to operate on.
head_pos : array | tuple | None
The array should be of shape ``(N, 10)``, holding the position
parameters as returned by e.g. `read_head_pos`. For backward
compatibility, this can also be a tuple of ``(trans, rot t)``
as returned by `head_pos_to_trans_rot_t`.
orig_sfreq : float | None
The original sample frequency of the data (that matches the
event sample numbers in ``epochs.events``). Can be ``None``
if data have not been decimated or resampled.
picks : array-like of int | None
If None only MEG, EEG, SEEG, and ECoG channels are kept
otherwise the channels indices in picks are kept.
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in head
coords and in meters. The default is ``'auto'``, which means
a head-digitization-based origin fit.
weight_all : bool
If True, all channels are weighted by the SSS basis weights.
If False, only MEG channels are weighted, other channels
receive uniform weight per epoch.
int_order : int
Order of internal component of spherical expansion.
ext_order : int
Order of external component of spherical expansion.
regularize : str | None
Basis regularization type, must be "in" or None.
See :func:`mne.preprocessing.maxwell_filter` for details.
Regularization is chosen based only on the destination position.
destination : str | array-like, shape (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, or a string path to a FIF file
containing a MEG device<->head transformation, or a 3-element array
giving the coordinates to translate to (with no rotations).
For example, ``destination=(0, 0, 0.04)`` would translate the bases
as ``--trans default`` would in MaxFilter™ (i.e., to the default
head location).
.. versionadded:: 0.12
ignore_ref : bool
If True, do not include reference channels in compensation. This
option should be True for KIT files, since Maxwell filtering
with reference channels is not currently supported.
return_mapping : bool
If True, return the mapping matrix.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
evoked : instance of Evoked
The averaged epochs.
See Also
--------
mne.preprocessing.maxwell_filter
mne.chpi.read_head_pos
Notes
-----
The Maxwell filtering version of this algorithm is described in [1]_,
in section V.B "Virtual signals and movement correction", equations
40-44. For additional validation, see [2]_.
Regularization has not been added because in testing it appears to
decrease dipole localization accuracy relative to using all components.
Fine calibration and cross-talk cancellation, however, could be added
to this algorithm based on user demand.
.. versionadded:: 0.11
References
----------
.. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
.. [2] Wehner DT, Hämäläinen MS, Mody M, Ahlfors SP. "Head movements
of children in MEG: Quantification, effects on source
estimation, and compensation. NeuroImage 40:541–550, 2008.
"""
from .preprocessing.maxwell import (_trans_sss_basis, _reset_meg_bads,
_check_usable, _col_norm_pinv,
_get_n_moments, _get_mf_picks,
_prep_mf_coils, _check_destination,
_remove_meg_projs)
if head_pos is None:
raise TypeError('head_pos must be provided and cannot be None')
from .chpi import head_pos_to_trans_rot_t
if not isinstance(epochs, _BaseEpochs):
raise TypeError('epochs must be an instance of Epochs, not %s'
% (type(epochs),))
orig_sfreq = epochs.info['sfreq'] if orig_sfreq is None else orig_sfreq
orig_sfreq = float(orig_sfreq)
if isinstance(head_pos, np.ndarray):
head_pos = head_pos_to_trans_rot_t(head_pos)
trn, rot, t = head_pos
del head_pos
_check_usable(epochs)
origin = _check_origin(origin, epochs.info, 'head')
recon_trans = _check_destination(destination, epochs.info, True)
logger.info('Aligning and averaging up to %s epochs'
% (len(epochs.events)))
if not np.array_equal(epochs.events[:, 0], np.unique(epochs.events[:, 0])):
raise RuntimeError('Epochs must have monotonically increasing events')
meg_picks, _, _, good_picks, coil_scale, _ = \
_get_mf_picks(epochs.info, int_order, ext_order, ignore_ref)
n_channels, n_times = len(epochs.ch_names), len(epochs.times)
other_picks = np.setdiff1d(np.arange(n_channels), meg_picks)
data = np.zeros((n_channels, n_times))
count = 0
# keep only MEG w/bad channels marked in "info_from"
info_from = pick_info(epochs.info, good_picks, copy=True)
all_coils_recon = _prep_mf_coils(epochs.info, ignore_ref=ignore_ref)
all_coils = _prep_mf_coils(info_from, ignore_ref=ignore_ref)
# remove MEG bads in "to" info
info_to = deepcopy(epochs.info)
_reset_meg_bads(info_to)
# set up variables
w_sum = 0.
n_in, n_out = _get_n_moments([int_order, ext_order])
S_decomp = 0. # this will end up being a weighted average
last_trans = None
decomp_coil_scale = coil_scale[good_picks]
exp = dict(int_order=int_order, ext_order=ext_order, head_frame=True,
origin=origin)
for ei, epoch in enumerate(epochs):
event_time = epochs.events[epochs._current - 1, 0] / orig_sfreq
use_idx = np.where(t <= event_time)[0]
if len(use_idx) == 0:
trans = epochs.info['dev_head_t']['trans']
else:
use_idx = use_idx[-1]
trans = np.vstack([np.hstack([rot[use_idx], trn[[use_idx]].T]),
[[0., 0., 0., 1.]]])
loc_str = ', '.join('%0.1f' % tr for tr in (trans[:3, 3] * 1000))
if last_trans is None or not np.allclose(last_trans, trans):
logger.info(' Processing epoch %s (device location: %s mm)'
% (ei + 1, loc_str))
reuse = False
last_trans = trans
else:
logger.info(' Processing epoch %s (device location: same)'
% (ei + 1,))
reuse = True
epoch = epoch.copy() # because we operate inplace
if not reuse:
S = _trans_sss_basis(exp, all_coils, trans,
coil_scale=decomp_coil_scale)
# Get the weight from the un-regularized version
weight = np.sqrt(np.sum(S * S)) # frobenius norm (eq. 44)
# XXX Eventually we could do cross-talk and fine-cal here
S *= weight
S_decomp += S # eq. 41
epoch[slice(None) if weight_all else meg_picks] *= weight
data += epoch # eq. 42
w_sum += weight
count += 1
del info_from
mapping = None
if count == 0:
data.fill(np.nan)
else:
data[meg_picks] /= w_sum
data[other_picks] /= w_sum if weight_all else count
# Finalize weighted average decomp matrix
S_decomp /= w_sum
# Get recon matrix
# (We would need to include external here for regularization to work)
exp['ext_order'] = 0
S_recon = _trans_sss_basis(exp, all_coils_recon, recon_trans)
exp['ext_order'] = ext_order
# We could determine regularization on basis of destination basis
# matrix, restricted to good channels, as regularizing individual
# matrices within the loop above does not seem to work. But in
# testing this seemed to decrease localization quality in most cases,
# so we do not provide the option here.
S_recon /= coil_scale
# Invert
pS_ave = _col_norm_pinv(S_decomp)[0][:n_in]
pS_ave *= decomp_coil_scale.T
# Get mapping matrix
mapping = np.dot(S_recon, pS_ave)
# Apply mapping
data[meg_picks] = np.dot(mapping, data[good_picks])
info_to['dev_head_t'] = recon_trans # set the reconstruction transform
evoked = epochs._evoked_from_epoch_data(data, info_to, picks,
n_events=count, kind='average')
_remove_meg_projs(evoked) # remove MEG projectors, they won't apply now
logger.info('Created Evoked dataset from %s epochs' % (count,))
return (evoked, mapping) if return_mapping else evoked
| bsd-3-clause | -8,414,976,939,089,250,000 | 40.604569 | 80 | 0.568995 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.