repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
uber/pyro | pyro/contrib/gp/likelihoods/binary.py | 1 | 1963 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import pyro
import pyro.distributions as dist
from pyro.contrib.gp.likelihoods.likelihood import Likelihood
class Binary(Likelihood):
"""
Implementation of Binary likelihood, which is used for binary classification
problems.
Binary likelihood uses :class:`~pyro.distributions.Bernoulli` distribution,
so the output of ``response_function`` should be in range :math:`(0,1)`. By
default, we use `sigmoid` function.
:param callable response_function: A mapping to correct domain for Binary
likelihood.
"""
def __init__(self, response_function=None):
super().__init__()
self.response_function = torch.sigmoid if response_function is None else response_function
def forward(self, f_loc, f_var, y=None):
r"""
Samples :math:`y` given :math:`f_{loc}`, :math:`f_{var}` according to
.. math:: f & \sim \mathbb{Normal}(f_{loc}, f_{var}),\\
y & \sim \mathbb{Bernoulli}(f).
.. note:: The log likelihood is estimated using Monte Carlo with 1 sample of
:math:`f`.
:param torch.Tensor f_loc: Mean of latent function output.
:param torch.Tensor f_var: Variance of latent function output.
:param torch.Tensor y: Training output tensor.
:returns: a tensor sampled from likelihood
:rtype: torch.Tensor
"""
# calculates Monte Carlo estimate for E_q(f) [logp(y | f)]
f = dist.Normal(f_loc, f_var.sqrt())()
if self.response_function is torch.sigmoid:
y_dist = dist.Bernoulli(logits=f)
else:
f_res = self.response_function(f)
y_dist = dist.Bernoulli(f_res)
if y is not None:
y_dist = y_dist.expand_by(y.shape[:-f.dim()]).to_event(y.dim())
return pyro.sample(self._pyro_get_fullname("y"), y_dist, obs=y)
| apache-2.0 | 8,119,565,542,898,236,000 | 36.037736 | 98 | 0.62812 | false |
flexpeace/btb | printing/print_mail.py | 1 | 4166 | import os
import sys
import glob
import json
from collections import defaultdict
from utils import UnicodeReader, slugify, count_pages, combine_pdfs, parser
import addresscleaner
from click2mail import Click2MailBatch
parser.add_argument("directory", help="Path to downloaded mail batch")
def fix_lines(address):
"""
Click2Mail screws up addresses with 3 lines. If we have only one address
line, put it in "address1". If we have more, put the first in
"organization", and subsequent ones in "addressN".
"""
lines = [a for a in [
address.get('organization', None),
address.get('address1', None),
address.get('address2', None),
address.get('address3', None)] if a]
if len(lines) == 1:
address['organization'] = ''
address['address1'] = lines[0]
address['address2'] = ''
address['address3'] = ''
if len(lines) >= 2:
address['organization'] = lines[0]
address['address1'] = lines[1]
address['address2'] = ''
address['address3'] = ''
if len(lines) >= 3:
address['address2'] = lines[2]
address['address3'] = ''
if len(lines) >= 4:
address['address3'] = lines[3]
return address
def collate_letters(mailing_dir, letters, page=1):
# Sort by recipient.
recipient_letters = defaultdict(list)
for letter in letters:
recipient_letters[(letter['recipient'], letter['sender'])].append(letter)
# Assemble list of files and jobs.
files = []
jobs = {}
for (recipient, sender), letters in recipient_letters.iteritems():
count = 0
for letter in letters:
filename = os.path.join(mailing_dir, letter["file"])
files.append(filename)
count += count_pages(filename)
end = page + count
jobs[recipient] = {
"startingPage": page,
"endingPage": end - 1,
"recipients": [fix_lines(addresscleaner.parse_address(recipient))],
"sender": addresscleaner.parse_address(sender),
"type": "letter"
}
page = end
vals = jobs.values()
vals.sort(key=lambda j: j['startingPage'])
return files, vals, page
def collate_postcards(postcards, page=1):
# Collate postcards into a list per type and sender.
type_sender_postcards = defaultdict(list)
for letter in postcards:
key = (letter['type'], letter['sender'])
type_sender_postcards[key].append(letter)
files = []
jobs = []
for (postcard_type, sender), letters in type_sender_postcards.iteritems():
files.append(os.path.join(
os.path.dirname(__file__),
"postcards",
"{}.pdf".format(postcard_type)
))
jobs.append({
"startingPage": page + len(files) - 1,
"endingPage": page + len(files) - 1,
"recipients": [
fix_lines(addresscleaner.parse_address(letter['recipient'])) for letter in letters
],
"sender": addresscleaner.parse_address(sender),
"type": "postcard",
})
return files, jobs, page + len(files)
def run_batch(args, files, jobs):
filename = combine_pdfs(files)
print "Building job with", filename
batch = Click2MailBatch(
username=args.username,
password=args.password,
filename=filename,
jobs=jobs,
staging=args.staging)
if batch.run(args.dry_run):
os.remove(filename)
def main():
args = parser.parse_args()
with open(os.path.join(args.directory, "manifest.json")) as fh:
manifest = json.load(fh)
if manifest["letters"]:
lfiles, ljobs, lpage = collate_letters(args.directory, manifest["letters"], 1)
print "Found", len(ljobs), "letter jobs"
if ljobs:
run_batch(args, lfiles, ljobs)
if manifest["postcards"]:
pfiles, pjobs, ppage = collate_postcards(manifest["postcards"], 1)
print "Found", len(pjobs), "postcard jobs"
if pjobs:
run_batch(args, pfiles, pjobs)
if __name__ == "__main__":
main()
| agpl-3.0 | 2,824,114,001,440,336,400 | 31.546875 | 98 | 0.590494 | false |
Tunghsu/SAMS | SAMS/urls.py | 1 | 1355 | from django.conf.urls import patterns, include, url
import settings
from SAMS.views import root, tallview, allview, nework, profile, login, logout, checkassign, result, admin, check, submit, view, viewAssignment, download, course, classes,rate,search
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r'^login/$', login),
(r'^result/$', result),
(r'^admin/$', admin),
(r'^view/$', view),
(r'^check/$', check),
#(r'^class/$', student),
(r'^submit/$', submit),
(r'^detail/(\d+)/$', viewAssignment),
(r'^checkassign/(\d+)/$', checkassign),
(r'^download/(\d+)/$', download),
(r'^course/(\d+)/$', course),
(r'^class/(\d+)/$', classes),
(r'^logout/$', logout),
(r'^profile/$', profile),
(r'^add/(\d+)/$', nework),
(r'^rate/(\d+)/$', rate),
(r'^search/$', search),
(r'^allview/$', allview),
(r'^tallview/$', tallview),
(r'^$', root),
# Examples:
# url(r'^$', 'SAMS.views.home', name='home'),
# url(r'^SAMS/', include('SAMS.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| gpl-3.0 | -7,862,490,464,631,492,000 | 34.657895 | 182 | 0.590406 | false |
mdraeger/gmapcatcher | gmapcatcher/tilesRepo/tilesRepoRMaps.py | 1 | 16125 | ## @package gmapcatcher.tilesRepo.tilesRepoRMaps
# This module provides sqlite3 tile repository functions in the format
# used by the RMaps android app.
#
# Usage:
#
# - constructor requires MapServ instance, because method
# 'get_tile_from_coord' is provided in the MapServ
#
import os
import gtk
import sys
import time
import sqlite3
import threading
import traceback
import gmapcatcher.lrucache as lrucache
import gmapcatcher.widgets.mapPixbuf as mapPixbuf
from threading import Lock, Thread
from gmapcatcher.mapConst import *
class tilesReposRMapsException(Exception):
pass
class tilesReposRMapsInvalidPathException(tilesReposRMapsException):
pass
class tileNotInRepository(Exception):
pass
SQL_IDX_X = 0
SQL_IDX_Y = 1
SQL_IDX_ZOOM = 2
SQL_IDX_LAYER = 3
SQL_IDX_TSTAMP = 4
SQL_IDX_IMG = 5
SQL_DATABASE_DDL = """
CREATE TABLE "tiles" (
"x" INTEGER NOT NULL ,
"y" INTEGER NOT NULL ,
"z" INTEGER NOT NULL ,
"s" INTEGER NOT NULL ,
"image" BLOB NOT NULL ,
PRIMARY KEY (x,y,z,s)
);
CREATE TABLE android_metadata (locale TEXT);
CREATE INDEX IND on tiles (x,y,z,s);
CREATE TABLE info(minzoom,maxzoom);
INSERT INTO android_metadata (locale) VALUES ("en_EN");
"""
"""
Another not used DDL:
CREATE TABLE maps (id INTEGER, name TEXT, zoom TEXT, type INTEGER, PRIMARY KEY(id));
CREATE TABLE regions_points (regionsid INTEGER, lat REAL,lon REAL, pos INTEGER,
PRIMARY KEY(regionsid,lat,lon));
CREATE TABLE map_regions (regionsid INTEGER, mapid INTEGER, PRIMARY KEY(regionsid));
"""
from tilesRepo import TilesRepository
class RMapsThread(Thread):
def __init__(self, url_dir, url_filenameformat):
Thread.__init__(self)
self.url_dir = url_dir
self.url_filenameformat = url_filenameformat
self.dbconns = []
self.dbcurss = []
self.dbzooms = []
self.sql_request = None
self.sql_response = None
self.finish_flag = False
self.event = threading.Event()
self.thlock = threading.Lock()
self.resplock = threading.Lock()
self.respcond = threading.Condition(self.resplock)
if not os.path.isdir(url_dir):
os.makedirs(url_dir)
def run(self):
while True:
self.event.wait()
if self.finish_flag:
# Closes every DB
for conn in self.dbconns:
if not conn is None:
conn.close()
self.dbconns = []
self.event.clear()
return True
self.event.clear()
self.process_sqlrequest()
self.respcond.acquire()
self.respcond.notify()
self.respcond.release()
def event_clear(self):
self.event.clear()
def event_set(self):
self.event.set()
def finish_thread(self):
self.finish_flag = True
self.event.set()
def process_sqlrequest(self):
#print "D:process_sqlrequest: " + str(thread.get_ident())
if not self.sql_request is None:
req = self.sql_request[1]
if self.sql_request[0] == "store_tile":
self.store_tile(req[0], req[1], req[2], req[3], req[4])
elif self.sql_request[0] == "get_tile_row":
self.get_tile_row(req[0], req[1], req[2], req[3])
elif self.sql_request[0] == "delete_tile":
self.delete_tile(req[0], req[1], req[2])
self.sql_request = None
def dbconnection(self, layer):
# Extends the internal cache to hold the position for the given layer
if len(self.dbconns) <= layer:
self.dbconns.extend(None for i in range(len(self.dbconns), layer + 1))
self.dbcurss.extend(None for i in range(len(self.dbcurss), layer + 1))
self.dbzooms.extend(None for i in range(len(self.dbzooms), layer + 1))
if self.dbconns[layer] is None:
#print "D:sqlite3.connect( url ): " + str(thread.get_ident())
createTable = False
dburl = os.path.join(self.url_dir, self.url_filenameformat % LAYER_NAMES[layer])
if(not os.path.isfile(dburl)):
createTable = True
conn = sqlite3.connect(dburl)
curs = conn.cursor()
self.dbconns[layer] = conn
self.dbcurss[layer] = curs
if createTable:
#process create table
curs.executescript(SQL_DATABASE_DDL)
conn.commit()
self.dbzooms[layer] = curs.execute("SELECT minzoom, maxzoom FROM info LIMIT 1").fetchone()
return self.dbconns[layer]
def dbcoursor(self, layer):
self.dbconnection(layer)
return self.dbcurss[layer]
def update_zoom(self, layer, zoom):
if self.dbzooms[layer]:
mn, mx = self.dbzooms[layer]
if zoom < mn:
mn = zoom
if zoom > mx:
mx = zoom
res = (int(mn), int(mx))
if res != self.dbzooms[layer]:
self.dbzooms[layer] = res
self.dbcoursor(layer).execute("UPDATE info SET minzoom = ? AND maxzoom = ?", res)
else:
res = (zoom, zoom)
self.dbzooms[layer] = res
self.dbcoursor(layer).execute("INSERT INTO info (minzoom, maxzoom) VALUES (?,?)", res)
def get_tile_row(self, layer, zoom_level, coord, olderthan):
# olderthan is ignored in this format, sorry =/
qry = "SELECT x,y,z,%d,date('now'),image FROM tiles WHERE z=%i AND x=%i AND y=%i AND s=%i" % (layer, zoom_level, coord[0], coord[1], 0)
dbcursor = self.dbcoursor(layer)
dbcursor.execute(qry)
self.sql_response = dbcursor.fetchone()
def store_tile(self, layer, zoom_level, coord, tstamp, data):
try:
dbcursor = self.dbcoursor(layer)
dbcursor.execute("INSERT INTO tiles (x,y,z,s,image) VALUES(?,?,?,?,?)", (coord[0], coord[1], zoom_level, 0, sqlite3.Binary(data)))
self.update_zoom(layer, zoom_level)
self.dbconnection(layer).commit()
except sqlite3.IntegrityError:
# Known problem - one tile is downloaded more than once, when tile is:
# - scheduled for donwload
# - mouse moves map in the window
# - in such case missing tiles are again scheduled for donwload
# ToDo: - solution: maintain queue tiles scheduled for download
ei = sys.exc_info()
print traceback.format_exception(ei[0], ei[1], ei[2])
#print "Debug: " + str(sys.exc_info()[0]) + str(sys.exc_info()[1]) + str(sys.exc_info()[2])
#pass
def delete_tile(self, layer, zoom_level, coord):
qry = "DELETE FROM tiles WHERE z=%i AND x=%i AND y=%i AND s=%i" % (zoom_level, coord[0], coord[1], 0)
dbcursor = self.dbcoursor(layer)
dbcursor.execute(qry)
self.dbconnection(layer).commit()
def set_sql_request(self, req):
self.sql_request = [req[0], req[1]]
def get_sql_response(self):
resp = self.sql_response
self.sql_response = None
return resp
class RMapsFuncs():
def __init__(self, url_dir, url_filenameformat):
self.url = os.path.join(url_dir, url_filenameformat)
self.sql_thread = None
if self.sql_thread is None:
self.sql_thread = RMapsThread(url_dir, url_filenameformat)
if not self.sql_thread.isAlive():
self.sql_thread.start()
def finish(self):
if self.sql_thread is None:
return
self.sql_thread.finish_thread()
self.sql_thread.join()
self.sql_thread = None
def restart_thread(self, url_dir, url_filenameformat):
if self.sql_thread is not None:
if self.sql_thread.isAlive():
self.sql_thread.finish_thread()
self.sql_thread.join()
self.sql_thread = None
self.sql_thread = RMapsThread(url_dir, url_filenameformat)
self.sql_thread.start()
# coord is [x,y]
def get_tile_row(self, layer, zoom_level, coord, olderthan=-1):
try:
self.sql_thread.thlock.acquire()
self.sql_thread.respcond.acquire()
req = ("get_tile_row", (layer, zoom_level, coord, olderthan))
self.sql_thread.set_sql_request(req)
self.sql_thread.event_set()
self.sql_thread.respcond.wait()
self.sql_thread.respcond.release()
resp = self.sql_thread.get_sql_response()
finally:
self.sql_thread.thlock.release()
return resp
def store_tile(self, layer, zoom_level, coord, tstamp, data):
try:
self.sql_thread.thlock.acquire()
self.sql_thread.respcond.acquire()
req = ("store_tile", (layer, zoom_level, coord, tstamp, data))
self.sql_thread.set_sql_request(req)
self.sql_thread.event_set()
self.sql_thread.respcond.wait()
self.sql_thread.respcond.release()
finally:
self.sql_thread.thlock.release()
return
def delete_tile(self, layer, zoom_level, coord):
try:
self.sql_thread.thlock.acquire()
self.sql_thread.respcond.acquire()
req = ("delete_tile", (layer, zoom_level, coord))
self.sql_thread.set_sql_request(req)
self.sql_thread.event_set()
self.sql_thread.respcond.wait()
self.sql_thread.respcond.release()
resp = self.sql_thread.get_sql_response()
finally:
self.sql_thread.thlock.release()
return resp
class TilesRepositoryRMaps(TilesRepository):
def __init__(self, MapServ_inst, conf):
TilesRepository.__init__(self, MapServ_inst, conf)
self.tile_cache = lrucache.LRUCache(1000)
self.mapServ_inst = MapServ_inst
self.conf = conf
self.configpath = os.path.join(conf.init_path, conf.map_service)
self.lock = Lock()
self.missingPixbuf = mapPixbuf.missing()
self.sqlite3func = RMapsFuncs(self.configpath, RMAPS_REPOSITORY_FILE_FORMAT)
def finish(self):
self.sqlite3func.finish()
# last command in finish
TilesRepository.finish(self)
## Sets new repository path to be used for storing tiles
def set_repository_path(self, conf):
newpath = os.path.join(conf.init_path, conf.map_service)
self.sqlite3func.restart_thread(newpath, RMAPS_REPOSITORY_FILE_FORMAT)
## Returns the PixBuf of the tile
# Uses a cache to optimise HDD read access
# PUBLIC
def load_pixbuf(self, coord, layer, force_update):
filename = self.coord_to_path(coord, layer)
if (not force_update) and (filename in self.tile_cache):
pixbuf = self.tile_cache[filename]
else:
dbrow = self.sqlite3func.get_tile_row(layer, coord[2], (coord[0], coord[1]))
if dbrow is None:
pixbuf = self.missingPixbuf
else:
try:
pixbuf = self.create_pixbuf_from_data(dbrow[SQL_IDX_IMG])
self.tile_cache[filename] = pixbuf
except:
pixbuf = self.missingPixbuf
return pixbuf
# PUBLIC
def remove_old_tile(self, coord, layer, filename=None, intSeconds=86400):
"""not used anymore?! don't know about it. But repoFS and repoMGMaps got rid of this
methods.
"""
dbrow = self.sqlite3func.get_tile_row(layer, coord[2], (coord[0], coord[1]))
# TODO: should be OK, but test properly
if dbrow[SQL_IDX_TSTAMP] >= (int(time.time()) - intSeconds):
try:
if filename is None:
filename = self.coord_to_path(coord, layer)
self.tile_cache[filename] = self.create_pixbuf_from_data(dbrow[SQL_IDX_IMG])
except:
pass
return False
try:
if filename is None:
filename = self.coord_to_path(coord, layer)
del self.tile_cache[filename]
except KeyError:
pass
return True
# PUBLIC
def is_tile_in_local_repos(self, coord, layer):
filename = self.coord_to_path(coord, layer)
if filename in self.tile_cache:
return True
dbrow = self.sqlite3func.get_tile_row(layer, coord[2], (coord[0], coord[1]))
if dbrow is None:
return False
else:
return True
def create_pixbuf_from_data(self, data):
# Default result to the "data" buffer
pixbuf = data
try:
loader = gtk.gdk.PixbufLoader()
loader.write(data)
loader.close()
pixbuf = loader.get_pixbuf()
except:
#print traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
raise
return pixbuf
## Get the png file for the given location
# Returns true if the file is successfully retrieved
# private
def get_png_file(self, coord, layer, online, force_update, conf):
# remove tile only when online
filename = self.coord_to_path(coord, layer)
if (force_update and online):
# force update = delete tile from repository and download new version
#self.remove_old_tile(coord, layer, filename)
# if in remove_old_tile tile_cache is populated if tile is not too old
if filename in self.tile_cache:
del self.tile_cache[filename]
else:
# we don't have to download tile from internet
if filename in self.tile_cache:
return True
dbrow = self.sqlite3func.get_tile_row(layer, coord[2], (coord[0], coord[1]))
if dbrow is not None:
try:
self.tile_cache[filename] = self.create_pixbuf_from_data(dbrow[5])
except:
pass
return True
if not online:
return False
# download data
try:
oa_data = self.mapServ_inst.get_tile_from_coord(coord, layer, conf)
try:
self.tile_cache[filename] = self.create_pixbuf_from_data(dbrow[SQL_IDX_IMG])
except:
pass
self.sqlite3func.store_tile(layer, coord[2], (coord[0], coord[1]), int(time.time()), oa_data)
return True
except KeyboardInterrupt:
raise
except:
ei = sys.exc_info()
print '\tdownload failed - ' + str(traceback.format_exception(ei[0], ei[1], ei[2], None))
return False
def get_plain_tile(self, coord, layer):
dbrow = self.sqlite3func.get_tile_row(layer, coord[2], (coord[0], coord[1]))
if dbrow is not None:
return dbrow[5]
raise tileNotInRepository(str((coord, layer)))
def store_plain_tile(self, coord, layer, tiledata):
if self.is_tile_in_local_repos(coord, layer):
self.sqlite3func.delete_tile(layer, coord[2], (coord[0], coord[1]))
self.sqlite3func.store_tile(layer, coord[2], (coord[0], coord[1]), int(time.time()), tiledata)
## Return the absolute path to a tile
# only check path
# tile_coord = (tile_X, tile_Y, zoom_level)
# smaple of the Naming convention:
# \.googlemaps\tiles\15\0\1\0\1.png
# We only have 2 levels for one axis
# at most 1024 files in one dir
# private
def coord_to_path(self, tile_coord, layer):
path = os.path.join(self.configpath,
self.conf.get_layer_dir(layer),
str('%d' % tile_coord[2]),
str(tile_coord[0] / 1024),
str(tile_coord[0] % 1024),
str(tile_coord[1] / 1024),
str(tile_coord[1] % 1024) + ".png"
)
return path
| gpl-2.0 | 3,090,410,139,767,253,500 | 33.235669 | 144 | 0.57631 | false |
kbase/assembly | lib/assembly/plugins/discovar.py | 1 | 1581 | import glob
import logging
import os
import subprocess
from plugins import BaseAssembler
from yapsy.IPlugin import IPlugin
logger = logging.getLogger(__name__)
class DiscovarAssembler(BaseAssembler, IPlugin):
def run(self):
"""
Build the command and run.
Return list of contig file(s)
"""
reads = self.data.readsets
self.fastq_to_bam(reads)
os.environ["MALLOC_PER_THREAD"] = "1"
cmd_args = [self.executable, 'NUM_THREADS=4', 'READS='+self.outpath+'sample.bam', 'REGIONS=all', 'TMP='+self.outpath, 'OUT_HEAD='+self.outpath+'/discovar']
self.arast_popen(cmd_args)
contigs = glob.glob(self.outpath + '/*.final.fasta')
if not contigs:
#raise Exception("No contigs")
logger.warning("No contigs")
return {'contigs': contigs}
def fastq_to_bam(self, reads):
# cmd_args = [self.picard, 'FastqToSam', 'V=Illumina', 'O='+self.outpath+'/sample.bam', 'SM=sample']
cmd_args = [self.picard, 'FastqToSam','TMP_DIR='+self.outpath,
'V=Standard', 'O='+self.outpath+'/sample.bam', 'SM=sample']
for d in reads:
if d.type == 'paired':
read1 = d.files[0]
cmd_args.append('F1=' + read1)
try:
read2 = d.files[1] # If 2 files
cmd_args.append('F2=' + read2)
except:
pass
if len(cmd_args) == 1:
raise Exception("No paired-end reads")
self.arast_popen(cmd_args)
| mit | -849,724,596,757,140,000 | 30 | 163 | 0.55408 | false |
linuxscout/arramooz | scripts/nouns/stardict.py | 1 | 3461 | #!/usr/bin/python2
# -*- coding=utf-8 -*-
#************************************************************************
# $Id: generatenoundict.py,v 0.7 2011/03/26 01:10:00 Taha Zerrouki $
#
# ------------
# Description:
# ------------
# Copyright (c) 2011, Arabtechies, Arabeyes Taha Zerrouki
#
# This file is the main file to execute the application in the command line
#
# -----------------
# Revision Details: (Updated by Revision Control System)
# -----------------
# $Date: 2009/06/02 01:10:00 $
# $Author: Taha Zerrouki $
# $Revision: 0.7 $
# $Source: arabtechies.sourceforge.net
#
#***********************************************************************/
import csvdict
import noundict_functions as ndf
class StarDict(csvdict.CsvDict):
""" a virtual converter of data from table to specific format
the data is big, then every function print string """
def __init__(self, wordtype, version="N/A", sqltype = "sqlite",):
"""
initiate the dict
"""
csvdict.CsvDict.__init__(self, wordtype, version)
self.sqltype= sqltype
def add_header(self,):
"""
add the header for new dict
"""
#~ line = "#" + "\n#".join(self.headerlines) + "\n"
line =""
return line
def add_record(self, noun_row):
"""
Add a new to the dict
"""
self.id +=1
fields = self.treat_tuple(noun_row)
line = ""
# to reduce the sql file size,
# doesn't work with multiple files
if fields["vocalized"]:
if fields["vocalized"] == fields["unvocalized"]:
line += fields["vocalized"]
else:
line += u"|".join([fields["vocalized"], fields["unvocalized"]])
line += "\n"
items=[];
# display
display_order=[
'wordtype',
'root',
'category',
'original',
'defined',
'gender', #جنس gender
'feminin', #مؤنث feminin
'masculin', #مذكر masculin
'mankous',
'feminable',
'number',
'single',
'masculin_plural',
'feminin_plural',
'broken_plural',
'mamnou3_sarf',
'relative',
'definition',
]
line += u"%s [%s], %s, %s"%(fields["wordtype"], fields["root"],fields["gender"], fields["number"])
if fields['feminin']:
line += u"<br>مؤنثه: %s"%fields['feminin']
if fields['masculin']:
line += u"<br>مذكره: %s"%fields['masculin']
if fields['mamnou3_sarf']:
line += u"<br>%s"%fields['mamnou3_sarf']
if fields['broken_plural']:
line += u"<br>ج:%s"%fields['broken_plural']
if fields['definition']:
line += u"<br>%s"%fields['definition']
line += "\n"
#~ for key in self.display_order[3:]:
#~ items.append(u"%s:'%s'<br>"%(key, fields[key]));
#~ line += u"\n%s\n"%u",".join(items);
return line
def add_footer(self):
"""close the data set, used for ending xml, or sql"""
return ""
| gpl-2.0 | -9,102,461,087,602,533,000 | 33.049505 | 110 | 0.443734 | false |
ActiveState/code | recipes/Python/578369_War_Game_Version_5/recipe-578369.py | 1 | 7275 | from random import randint, seed
from time import time
# region: change
# from window import *
from Zpaw import *
from cards import *
card_list = [card_0, card_1, card_2, card_3, card_4, card_5, card_6, card_7, card_8, card_9]
# endregion
def game():
print 'Welcome to WAR V5!'
print
asking = True
while asking:
try:
players = int(raw_input('How many players are there? '))
if players < 2:
print 'There must be at least two players.'
else:
asking = False
except:
print 'You must enter a number.'
print
names = []
# region: change
longest_name = 0
for name in range(players):
names.append(raw_input('What is the name of player ' + str(name + 1) + '? '))
if len(names[-1]) > longest_name:
longest_name = len(names[-1])
# endregion
deck = []
for card in range(10):
for player in range(players):
deck.append(card)
hands = []
seed(time())
for player in range(players):
hand = ([], [])
for card in range(10):
index = randint(0, len(deck) - 1)
hand[0].append(deck[index])
del deck[index]
hand[0].sort()
hands.append(hand)
for round in range(1, 11):
table = []
will_play = []
high_card = 0
for player in range(players):
will_play.append(player)
for turn in range(players):
for line in range(50):
print
index = randint(0, len(will_play) - 1)
now_play = will_play[index]
del will_play[index]
print 'Round', round
raw_input('It is ' + names[now_play] + "'s turn to play.")
print
# region: change
if len(table) == 0:
print 'There are no cards on the table.\n'
else:
table_window = window(len(table) * 6, longest_name + 13)
for card in range(len(table)):
# name_page = page(1, len(names[table[card][0]]) + 9)
# name_page.mutate(0, 0, names[table[card][0]] + ' played')
# table_window.append(name_page, [card * 6, 0])
# table_window.append(card_list[table[card][1]], [card * 6, len(names[table[card][0]]) + 8])
# table_window += struct(True, card * 6, 0, name_page)
# table_window += struct(True, card * 6, len(names[table[card][0]]) + 8, card_list[table[card][1]])
table_window += page(1, len(names[table[card][0]]) + 9) \
.mutate(0, 0, names[table[card][0]] + ' played').y(card * 6)
table_window += page(0, 0).link(card_list[table[card][1]]) \
.x(len(names[table[card][0]]) + 8).y(card * 6)
print table_window
print 'These are your playing cards:'
playing_window = window(7, len(hands[now_play][0]) * 6)
for index in range(len(hands[now_play][0])):
# playing_window.append(card_list[hands[now_play][0][index]], [1, index * 6 + 1])
# playing_window += struct(True, 1, index * 6 + 1, card_list[hands[now_play][0][index]])
playing_window += page(0, 0).link(card_list[hands[now_play][0][index]]).x(index * 6 + 1).y(1)
print playing_window
if len(hands[now_play][1]) > 0:
hands[now_play][1].sort()
print 'These are your captured cards:'
capture_window = window(7, len(hands[now_play][1]) * 6)
for index in range(len(hands[now_play][1])):
# capture_window.append(card_list[hands[now_play][1][index]], [1, index * 6 + 1])
# capture_window += struct(True, 1, index * 6 + 1, card_list[hands[now_play][1][index]])
capture_window += page(0, 0).link(card_list[hands[now_play][1][index]]).x(index * 6 + 1).y(1)
print capture_window
# endregion
asking = True
while asking:
try:
card = int(raw_input('What card do you want to play? '))
if card >= 0 and card <= 9:
try:
hands[now_play][0].remove(card)
table.append((now_play, card))
if card > high_card:
high_card = card
asking = False
except:
print 'You do not have that card.'
else:
print 'You must enter a value between -1 and 10.'
except:
print 'You must enter a number.'
for line in range(50):
print
#region: change
table_window = window(len(table) * 6, longest_name + 13)
for card in range(len(table)):
# name_page = page(1, len(names[table[card][0]]) + 9)
# name_page.mutate(0, 0, names[table[card][0]] + ' played')
# table_window.append(name_page, [card * 6, 0])
# table_window.append(card_list[table[card][1]], [card * 6, len(names[table[card][0]]) + 8])
# table_window += struct(True, card * 6, 0, name_page)
# table_window += struct(True, card * 6, len(names[table[card][0]]) + 8, card_list[table[card][1]])
table_window += page(1, len(names[table[card][0]]) + 9) \
.mutate(0, 0, names[table[card][0]] + ' played').y(card * 6)
table_window += page(0, 0).link(card_list[table[card][1]]) \
.x(len(names[table[card][0]]) + 8).y(card * 6)
print table_window
# endregion
hand_out = []
for index in range(players):
if table[index][1] == high_card:
hand_out.append(table[index][0])
while len(table) > 0:
hands[hand_out[randint(0, len(hand_out) - 1)]][1].append(table[0][1])
del table[0]
for player in range(players):
if len(hands[player][1]) > 0:
print names[player] + ' has captured ' + str(len(hands[player][1])) + ' cards.'
print
raw_input('End Of Round ' + str(round))
for line in range(50):
print
high_score = 0
scores = []
for player in range(players):
total = 0
for card in range(len(hands[player][1])):
total += hands[player][1][card]
if total > high_score:
high_score = total
if len(scores) == 0 or scores[len(scores) - 1][1] <= total:
scores.append((player, total))
else:
for index in range(len(scores)):
if total > scores[index][1]:
scores.insert((player, total))
break
for player in range(players):
print names[scores[player][0]] + ' received ' + str(scores[player][1]) + ' points.'
print
for index in range(10):
raw_input('GAME OVER ... ' + str(9 - index))
if __name__ == '__main__':
game()
| mit | -4,348,524,934,949,283,000 | 43.090909 | 119 | 0.485636 | false |
RomainBrault/OVFM | Climate/climate2.py | 1 | 7428 | import numpy as np
from sklearn import ensemble
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
from sklearn import linear_model
import OVFM.Model as md
import OVFM.FeatureMap as fm
import OVFM.Risk as rsk
import OVFM.LearningRate as lr
import OVFM.DataGeneration as dg
import OVFM.SGD as sgd
import scipy.optimize
import matplotlib.pyplot as plt
from IPython import embed
from joblib import Parallel, delayed
# Load the training and test data sets
train = np.genfromtxt('climate.csv', delimiter=',',skip_header=1)
# test = np.genfromtxt('test.csv', delimiter=',',skip_header=1)
def TempCV( X, Y, model, optimizer, m = 12 ):
n = X.shape[ 0 ]
MSE = 0
for i in xrange( 0, n - m ):
optimizer.fit( model, X[ i:i + m, : ], Y[ i:i + m, : ] )
MSE = MSE + ( ( model( X[ i + m, : ].reshape( ( 1, X.shape[ 1 ] ) ) ) - Y[ i + m, : ] ) ** 2 ).mean( )
return MSE / ( n - m )
def gridsearch( eta0 ):
phi_l = fm.DecomposableFF( gamma, train_X.shape[ 1 ], D, L )
loss = ls.L2( 0.0 )
re = reg.L1( C )
l = lr.Constant( eta0 )
ovksgd = sgd.SGD( phi_l, l, loss, re, 10, 10, False, 0 )
return ( eta0, TempCV( train_X, train_y, ovksgd, 12 ) )
if __name__ == '__main__':
n_stations = 1
train = np.genfromtxt('climate.csv', delimiter=',',skip_header=1)
gps_random_sample = np.random.randint( 0, train.shape[ 0 ] / ( 12 * 13 ), n_stations )
imp = preprocessing.Imputer( missing_values='NaN', strategy='mean', axis=0 )
train = imp.fit_transform( train )
# scaler = preprocessing.StandardScaler( )
# train = scaler.fit_transform( train )
# reshape: station, year, month, variables
distances = train.reshape( ( train.shape[ 0 ] / ( 12 * 13 ), 13, 12, train.shape[ 1 ] ) ).astype( float )[gps_random_sample,0,0,2:4]
train = train.reshape( ( train.shape[ 0 ] / ( 12 * 13 ), 13, 12, train.shape[ 1 ] ) ).astype( float )[gps_random_sample,:,:,4:]
meanl = train.mean( axis = ( 0, 1, 2 ) )[ np.newaxis, np.newaxis, np.newaxis, : ] # mean per location for each variable and year
std = np.sqrt( train.var( axis = ( 0, 1, 2 ) )[ np.newaxis, np.newaxis, np.newaxis, : ] ) # var per location for each variable and years
std[ std == 0 ] = 1
train = ( train - meanl ) / std
# Create numpy arrays for use with scikit-learn
# transpose into year, month, variables, stations
train = train.transpose( ( 1, 2, 3, 0 ) )
train = train.reshape( ( 12 * 13, train.shape[ 2 ], train.shape[ 3 ] ) )
train_X = train[:-1,:,:]
train_y = train[1:,:,:]
A = 5
np.savetxt( 'station.csv', train[:12,:,:].reshape( 12, 18 ) )
train_X_Ay = train[:12*A-1,:,:]
train_y_Ay = train[1:12*A,:,:]
train_X = train_X.reshape( ( train_X.shape[ 0 ], train_X.shape[ 1 ] * train_X.shape[ 2 ] ) )
train_y = train_y.reshape( ( train_y.shape[ 0 ], train_y.shape[ 1 ] * train_y.shape[ 2 ] ) )
train_X_Ay = train_X_Ay.reshape( ( train_X_Ay.shape[ 0 ], train_X_Ay.shape[ 1 ] * train_X_Ay.shape[ 2 ] ) )
train_y_Ay = train_y_Ay.reshape( ( train_y_Ay.shape[ 0 ], train_y_Ay.shape[ 1 ] * train_y_Ay.shape[ 2 ] ) )
# gff = fm.GaussianFF( gamma, train_X.shape[ 1 ], D )
# Kex = gff.kernel_exact( train_X )
# Kap = gff.kernel_approx( train_X )
# fig, axes = plt.subplots( nrows=1, ncols=2, sharex=False, sharey=False )
# im = axes[ 0 ].imshow( Kex, origin = 'lower' )
# im.set_cmap( 'hot' )
# axes[ 0 ].set_title( 'Kernel exact' )
# im = axes[ 1 ].imshow( Kap, origin = 'lower' )
# im.set_cmap( 'hot' )
# axes[ 1 ].set_title( 'Kernel approximation' )
# plt.show( )
# print 'Kernel approximation MSE:', np.linalg.norm( Kex - Kap ) ** 2 / train_X.size
# embed( )
L = np.eye( train_y.shape[ 1 ] )
# distances = 0.99 * np.eye( distances.shape[ 0 ] ) + 0.01 * np.ones( ( distances.shape[ 0 ], distances.shape[ 0 ] ) )
# distances = np.exp( - 1.0 / 125 * scipy.spatial.distance.squareform( scipy.spatial.distance.pdist( distances ) ) ** 2 )
# print distances
# L = np.kron( distances, np.eye( 18 ) ) + 10e-10 * np.eye( train_y.shape[ 1 ] )
D = 1000
gamma = 0.5 / train_X.shape[ 1 ]
C = 0.001
eta0 = 0.075
# L = np.eye( train_y.shape[ 1 ] )
# L = np.kron( np.eye( 18 ), 0.1 * np.ones( ( n_stations, n_stations ) ) + 0.9 * np.eye( n_stations ) )
risk = rsk.GroupLasso( C, train_X.shape[ 1 ] )
L = np.eye( train_y.shape[ 1 ] )
model = md.Model( fm.DecomposableFF( gamma, train_X.shape[ 1 ], D, L ) )
ovksgd = sgd.SGD( risk, lr.Constant( eta0 ), 10, 10, 0 )
print 'MT id : ', TempCV( train_X, train_y, model, ovksgd, 12 )
# L = np.kron( np.eye( 18 ), 0.1 * np.ones( ( n_stations, n_stations ) ) + 0.9 * np.eye( n_stations ) )
# model = md.Model( fm.DecomposableFF( gamma, train_X.shape[ 1 ], D, L ) )
# ovksgd = sgd.SGD( risk, lr.Constant( eta0 ), 10, 10, 0 )
# print 'MT id ones : ', TempCV( train_X, train_y, model, ovksgd, 12 )
# distances = np.exp( - 10.0 / 125 * scipy.spatial.distance.squareform( scipy.spatial.distance.pdist( distances ) ) ** 2 )
# L = np.kron( np.eye( 18 ), distances )
# model = md.Model( fm.DecomposableFF( gamma, train_X.shape[ 1 ], D, L ) )
# ovksgd = sgd.SGD( risk, lr.Constant( eta0 ), 10, 10, 0 )
# print 'MT id distances: ', TempCV( train_X, train_y, model, ovksgd, 12 )
# X,X_,y,y_ = cross_validation.train_test_split(train_X,train_y,test_size=0.2)
# res = Parallel(n_jobs=4, backend="threading")(delayed(gridsearch)( i ) for i in np.logspace( -1, 1, 10 ) )
# print res
# ovksgd.fit( train_X_Ay, train_y_Ay )
# y_rf = sgd.predict( X_ )
# print sgd.predict( X ) - y
# print phi_l.grad( X[ 0, : ].reshape( ( 1, X.shape[ 1 ] ) ) ).shape
# J = np.mean( phi_l.Jacobian( X[np.random.randint(0,X.shape[0],2000),:], sgd.coefs ), axis = 0 )
# def check_Jacobian( x ):
# check = numpy.empty( ( 20, 20 ) )
# for i in xrange( 0, 20 ):
# def func( x ):
# return sgd.predict( x.reshape( ( 1, 20 ) ) ).ravel( )[ i ]
# def grad( x ):
# return phi_l.Jacobian( x.reshape( ( 1, 20 ) ), sgd.coefs )[ 0, i, : ].ravel( )
# check[ i, : ] = scipy.optimize.check_grad( func, grad, X[0,:] )
# return check
# print sgd.score( X_, y_ )
# A = np.dot( np.array( phi_l.B ), np.array( phi_l.B ).T )
# J = np.mean( phi_l.Jacobian( train_X[:12*13-1,:], ovksgd.coefs ), axis = 0 )
# J = ( J[ :, train_X.shape[ 1 ] / 2: ] - J[ :, :train_X.shape[ 1 ] / 2 ] ) > 0
ovksgd.fit( model, train_X_Ay, train_X_Ay )
J = np.mean( model.Jacobian( train_X_Ay[:,:] ), axis = 0 )
p_serie = np.empty( ( 12 * 13 - 1, train_X.shape[ 1 ] ) )
p_serie[ :12 * A, : ] = train_X[ :12 * A, : ]
for i in xrange( 12 * A, 12 * 13 - 1 ):
p_serie[ i, : ] = model( p_serie[ i - 1, : ].reshape( ( 1, train_X.shape[ 1 ] ) ) )
fig, axes = plt.subplots( nrows=2, ncols=1, sharex=False, sharey=False )
im = axes[ 1 ].pcolor( J )
im.set_cmap( 'hot' )
plt.colorbar( im )
axes[ 1 ].set_xlim([0,J.shape[ 0 ]])
axes[ 1 ].set_ylim([0,J.shape[ 1 ]])
axes[ 1 ].set_xticks(np.arange(J.shape[1])+0.5, minor=False)
axes[ 1 ].set_yticks(np.arange(J.shape[0])+0.5, minor=False)
axes[ 1 ].set_xticklabels([], minor=False)
axes[ 1 ].set_yticklabels([], minor=False)
axes[ 0 ].plot( np.arange( 0, 12 * 13 - 1 ), p_serie[ :, 0 ], color='r' )
axes[ 0 ].plot( np.arange( 0, 12 * 13 - 1 ), train_X[ :12 * 13 - 1, 0 ], color='k' )
axes[ 0 ].plot( np.arange( 0, 12 * 13 - 1 ), np.concatenate( ( [ train_X[ 0, 0 ] ], model( train_X[ :12 * 13 - 2, : ] )[ :, 0 ] ) ), color='b' )
axes[ 0 ].axvline( x = 12 * A - 1, color = 'r' )
plt.show( )
| mit | -1,827,408,120,212,465,400 | 38.094737 | 145 | 0.593296 | false |
amorphic/sparkcc-formulapi | tests/test_formula.py | 1 | 2017 | from mock import patch
from race_code import race_code_globals
from . import FormulaPiTestCase
class TestFormula(FormulaPiTestCase):
def setUp(self):
# Capture all of the global defaults and reset them after we modify them with this code
# to keep sanity when running tests.
self.original_race_code_globals = {}
globals_used = [
'capture',
'processor_pool',
'controller',
'running',
'display_frame',
'display_predator',
'frame_lock',
]
for global_used in globals_used:
self.original_race_code_globals[global_used] = getattr(race_code_globals, global_used)
# Patch these as we don't have a device connected to it.
self.patch_1 = patch('smbus.smbus.SMBus')
self.patch_1.start()
self.patch_2 = patch('race_code.zero_borg.ZeroBorg')
self.patch_2.start()
self.patch_3 = patch('cv2.VideoCapture')
self.patch_3.start()
# We don't want to call sudo each time during tests (we don't want to have to do that at
# all really)!
self.patch_4 = patch('os.system') # Patch a sudo call during testing.
self.patch_4.start()
# Setting the running mode to false for consistency.
race_code_globals.running = False
# Lets not run all those threads in the tests.
self.patch_5 = patch('threading.Thread')
self.patch_5.start()
# Patch the `formula` file as it loads `SMBus` automatically then import it.
from race_code import Formula
self.formula = Formula
def tearDown(self):
self.patch_1.stop()
self.patch_2.stop()
self.patch_3.stop()
self.patch_4.stop()
self.patch_5.stop()
for global_used in self.original_race_code_globals.keys():
setattr(race_code_globals, global_used, self.original_race_code_globals[global_used])
def test_yeti_motors(self):
pass
| mit | 6,386,742,852,895,019,000 | 34.385965 | 98 | 0.608825 | false |
ContextLab/hypertools | hypertools/tools/df2mat.py | 1 | 1267 | #!/usr/bin/env python
import pandas as pd
def df2mat(data, return_labels=False):
"""
Transforms a Pandas DataFrame into a Numpy array with binarized text columns
This function transforms single-level df to an array so it can be plotted
with HyperTools. Additionally, it uses the Pandas.Dataframe.get_dummies
function to transform text columns into binary vectors, or
'dummy variables'.
Parameters
----------
data : A single-level Pandas DataFrame
The df that you want to convert. Note that this currently only works
with single-level (not Multi-level indices).
Returns
----------
plot_data : Numpy array
A Numpy array where text columns are turned into binary vectors.
labels : list (optional)
A list of column labels for the numpy array. To return this, set
return_labels=True.
"""
df_str = data.select_dtypes(include=['object'])
df_num = data.select_dtypes(exclude=['object'])
for colname in df_str.columns:
df_num = df_num.join(pd.get_dummies(data[colname], prefix=colname))
plot_data = df_num.values
labels=list(df_num.columns.values)
if return_labels:
return plot_data,labels
else:
return plot_data
| mit | 7,591,116,103,792,897,000 | 27.155556 | 80 | 0.665351 | false |
sebatyler/django_tdd | django_tdd/settings.py | 1 | 3108 | """
Django settings for django_tdd project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'eacd**55=tf0g+44$&hkun9kd6jo2ix1qsz29!fr%p0m83!%9+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_tdd.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_tdd.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| mit | 6,978,576,545,869,449,000 | 24.9 | 91 | 0.68758 | false |
cargocult/rowan-python | rowan/controllers/base.py | 1 | 2016 | """
Some base classes for common styles of controller.
"""
import logging
class LoggingMixin(object):
"""
We generally want to be able to log the behavior of
controllers. This mixin makes a logging object available.
"""
@classmethod
def get_logger(cls):
if not hasattr(cls, "_logger"):
cls._logger = logging.getLogger("controller.%s" % cls.__name__)
return cls._logger
class BaseController(LoggingMixin):
"""
A base class for most types of controller.
"""
@classmethod
def import_dependencies(cls):
"""
This is called only if the wrapper is instantiated. It is used
so we can define libraries importing dependences that never
need to be installed if the associated subclasses are not
instantiated.
"""
pass
def __new__(cls, *args, **kws):
if cls.import_dependencies:
cls.import_dependencies()
cls.import_dependencies = None
instance = super(BaseController, cls).__new__(cls)
instance.__init__(*args, **kws)
return instance
def __call__(self, request):
raise NotImplementedError(
"Controllers should implement a __call__ method."
)
def get_children(self):
"""
Base controllers have no children. Single or multiple children
are managed by `Wrapper` and `Selector` base classes,
respectively.
"""
return []
class Wrapper(BaseController):
"""
A wrapper is a controller with a single child that it may or may
not choose to call.
"""
def __init__(self, controller):
self.controller = controller
def get_children(self):
return [self.controller]
class Selector(BaseController):
"""
A controller base class that manages many children in a list.
"""
def __init__(self, *controllers):
self.controllers = controllers
def get_children(self):
return self.controllers
| mit | 1,128,868,552,093,589,100 | 26.616438 | 75 | 0.614583 | false |
sony/nnabla | python/test/utils/test_graph_converters/test_identity.py | 1 | 2169 | # Copyright 2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import pytest
import numpy as np
import nnabla as nn
import nnabla.experimental.graph_converters as GC
from .ref_graphs.resnets import small_cf_resnet, small_id_resnet
from .ref_graphs.lenets import lenet, id_lenet
batch_size = 1
lenet_ref = id_lenet
resnet_ref = small_id_resnet
@pytest.mark.parametrize('seed', [313])
@pytest.mark.parametrize('test', [False, True])
@pytest.mark.parametrize('diff_batchsize', [True, False])
@pytest.mark.parametrize('graph_ref, graph_act', [(lenet_ref, lenet),
(resnet_ref, small_cf_resnet)])
def test_identity(seed, test, diff_batchsize, graph_ref, graph_act):
from .graph_converter_test_utils import structure_tester, value_tester
# Random number
np.random.seed(seed)
rng = np.random.RandomState(seed)
# Graph
x_data = rng.randn(batch_size, 3, 32, 32)
x = nn.Variable.from_numpy_array(x_data)
x1_data = rng.randn(128, 3, 32, 32)
x1 = nn.Variable.from_numpy_array(x1_data)
# Alter value and copy option
inp_x = x
cp_val = True
if diff_batchsize:
inp_x = x1
cp_val = False
y_tgt = graph_act(x, test=test)
# FunctionModifier
modifiers = []
modifiers.append(GC.IdentityModifier({x: inp_x}, copy_value=cp_val))
y_act = GC.GraphConverter(modifiers).convert(y_tgt)
# Ref Graph
y_ref = graph_ref(inp_x, test=test)
# Test
structure_tester(y_ref, y_act)
if diff_batchsize == False:
value_tester(y_tgt, y_act, rtol=6e-02, atol=5e-02)
| apache-2.0 | -1,049,547,621,589,168,800 | 28.712329 | 81 | 0.68142 | false |
TE-ToshiakiTanaka/alize | project/blue/testrunner.py | 1 | 1428 | import os
import sys
import alize
PATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if not PATH in sys.path:
sys.path.insert(0, PATH)
if alize.__version__ < "0.1.0":
sys.exit("alize version over 0.1.0. : %s " % (alize.__version__))
from alize.application import AlizeRunner
from alize.workspace import Workspace
from blue.utility import *
from blue.script.testcase_base import TestCase_Unit
class TestRunner(object):
def __init__(self):
self.runner = AlizeRunner()
self.workspace = Workspace(WORK_DIR)
self.lib = self.workspace.mkdir("lib")
self.tmp = self.workspace.mkdir("tmp")
self.log = self.workspace.mkdir("log")
self.report = self.workspace.mkdir("report")
self.tmp_video = self.workspace.mkdir(os.path.join("tmp", "video"))
self.workspace.rmdir(os.path.join("tmp", "evidence"))
self.tmp_evidence = self.workspace.mkdir(os.path.join("tmp","evidence"))
TestCase_Unit.register(LIB_DIR)
def execute(self, script):
self.runner.execute(script, SCRIPT_DIR)
def execute_with_report(self, script):
self.runner.execute_with_report(script, SCRIPT_DIR, REPORT_DIR)
if __name__ == "__main__":
if len(sys.argv[1:]) < 1:
sys.exit("Usage: %s <filename>" % sys.argv[0])
testcase = sys.argv[1]
runner = TestRunner()
runner.execute_with_report(testcase)
| mit | -1,571,979,897,884,462,000 | 30.733333 | 83 | 0.651961 | false |
JamHsu/Pocketmap | script/crawler/model/tabelog.py | 1 | 1390 | import json
from urlparse import urlparse
class Restaurant(object):
def __init__(self, data):
self.url = data.get('@id', '')
self.tabelog_id = self.parse_id_from_url(self.url)
self.name = data.get('name', '').encode('utf8')
self.img_url = data.get('image', '')
geo = data.get('geo', {})
self.lat = geo.get('latitude', 0)
self.lng = geo.get('longitude', 0)
agg_rating = data.get('aggregateRating', {})
self.rating = agg_rating.get('ratingValue',0)
self.note = None
address = data.get('address')
self.streetAddress = address.get('streetAddress').encode('utf8')
self.address_locality = address.get('addressLocality').encode('utf8')
self.address_region = address.get('addressRegion').encode('utf8')
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def parse_id_from_url(self, url):
url = url.encode('utf8')
url_obj = urlparse(url)
path = url_obj.path
path_split = path.split('/')
tabelog_id = ''
index = -1
if path_split:
while tabelog_id is '':
tabelog_id = path_split[index]
index -= 1
return tabelog_id
@classmethod
def hook(cls, data):
return cls(data) | mit | 4,636,327,346,200,734,000 | 31.348837 | 77 | 0.553957 | false |
pietromarchesi/pidpy | setup.py | 1 | 1173 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import numpy as np
from distutils.extension import Extension
def readme():
with open('README.md') as f:
return f.read()
setup(name='pidpy',
version='0.1',
description='Partial Information Decomposition in Python',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Topic :: Information Theory',
],
keywords='partial information decomposition synergy '
'redundancy unique',
#url='http://github.com/',
author='Pietro Marchesi',
author_email='[email protected]',
license='new BSD',
packages=['pidpy'],
install_requires=[
'numpy',
'joblib',
'pymorton'
],
include_package_data=True,
zip_safe=False,
test_suite = 'nose.collector',
tests_require = ['nose'],
ext_modules=[Extension('pidpy.utilsc', ['pidpy/utilsc.c'])],
include_dirs=[np.get_include()]
) | gpl-3.0 | -1,308,581,805,493,007,000 | 25.681818 | 66 | 0.595908 | false |
angr/angr | angr/storage/memory_mixins/paged_memory/pages/list_page.py | 1 | 12194 | # pylint:disable=abstract-method
import logging
from typing import Optional, List, Set, Tuple
from sortedcontainers import SortedSet
from . import PageBase
from angr.storage.memory_object import SimMemoryObject
from .cooperation import MemoryObjectMixin
l = logging.getLogger(name=__name__)
class ListPage(MemoryObjectMixin, PageBase):
def __init__(self, memory=None, content=None, sinkhole=None, mo_cmp=None, **kwargs):
super().__init__(**kwargs)
self.content: List[Optional[SimMemoryObject]] = content
self.stored_offset = SortedSet()
if content is None:
if memory is not None:
self.content: List[Optional[SimMemoryObject]] = [None] * memory.page_size # TODO: this isn't the best
self._mo_cmp = mo_cmp
self.sinkhole: Optional[SimMemoryObject] = sinkhole
def copy(self, memo):
o = super().copy(memo)
o.content = list(self.content)
o.stored_offset = self.stored_offset.copy()
o.sinkhole = self.sinkhole
o._mo_cmp = self._mo_cmp
return o
def load(self, addr, size=None, endness=None, page_addr=None, memory=None, cooperate=False, **kwargs):
result = []
last_seen = ... # ;)
# loop over the loading range. accumulate a result for each byte, but collapse results from adjacent bytes
# using the same memory object
for subaddr in range(addr, addr+size):
item = self.content[subaddr]
if item is None:
item = self.sinkhole
if item is not last_seen:
if last_seen is None:
self._fill(result, subaddr, page_addr, endness, memory, **kwargs)
result.append((subaddr + page_addr, item))
last_seen = item
if last_seen is None:
self._fill(result, addr + size, page_addr, endness, memory, **kwargs)
if not cooperate:
result = self._force_load_cooperation(result, size, endness, memory=memory, **kwargs)
return result
def _fill(self, result, addr, page_addr, endness, memory, **kwargs):
"""
Small utility function for behavior which is duplicated in load
mutates result to generate a new memory object and replace the last entry in it, which is None. Then, it will
insert the new memory object into self.content.
"""
global_end_addr = addr + page_addr
global_start_addr = result[-1][0]
size = global_end_addr - global_start_addr
new_ast = self._default_value(global_start_addr, size, name='%s_%x' % (memory.id, global_start_addr), key=(self.category, global_start_addr), memory=memory, **kwargs)
new_item = SimMemoryObject(new_ast, global_start_addr, endness=endness, byte_width=memory.state.arch.byte_width if memory is not None else 8)
subaddr_start = global_start_addr - page_addr
for subaddr in range(subaddr_start, addr):
self.content[subaddr] = new_item
self.stored_offset.add(subaddr)
result[-1] = (global_start_addr, new_item)
def store(self, addr, data, size=None, endness=None, memory=None, cooperate=False, **kwargs):
if not cooperate:
data = self._force_store_cooperation(addr, data, size, endness, memory=memory, **kwargs)
if size == len(self.content) and addr == 0:
self.sinkhole = data
self.content = [None]*len(self.content)
else:
max_addr = min(addr + size, len(self.content))
for subaddr in range(addr, max_addr):
self.content[subaddr] = data
self.stored_offset.add(subaddr)
def merge(self, others: List['ListPage'], merge_conditions, common_ancestor=None, page_addr: int=None,
memory=None, changed_offsets: Optional[Set[int]]=None):
if changed_offsets is None:
changed_offsets = set()
for other in others:
changed_offsets |= self.changed_bytes(other, page_addr)
all_pages: List['ListPage'] = [self] + others
if merge_conditions is None:
merge_conditions = [None] * len(all_pages)
merged_to = None
merged_objects = set()
merged_offsets = set()
for b in sorted(changed_offsets):
if merged_to is not None and not b >= merged_to:
l.info("merged_to = %d ... already merged byte 0x%x", merged_to, b)
continue
l.debug("... on byte 0x%x", b)
memory_objects = []
unconstrained_in = []
# first get a list of all memory objects at that location, and
# all memories that don't have those bytes
for sm, fv in zip(all_pages, merge_conditions):
if sm._contains(b, page_addr):
l.info("... present in %s", fv)
memory_objects.append((sm.content[b], fv))
else:
l.info("... not present in %s", fv)
unconstrained_in.append((sm, fv))
mos = set(mo for mo, _ in memory_objects)
mo_bases = set(mo.base for mo, _ in memory_objects)
mo_lengths = set(mo.length for mo, _ in memory_objects)
endnesses = set(mo.endness for mo in mos)
if not unconstrained_in and not (mos - merged_objects):
continue
# first, optimize the case where we are dealing with the same-sized memory objects
if len(mo_bases) == 1 and len(mo_lengths) == 1 and not unconstrained_in and len(endnesses) == 1:
the_endness = next(iter(endnesses))
to_merge = [(mo.object, fv) for mo, fv in memory_objects]
# Update `merged_to`
mo_base = list(mo_bases)[0]
mo_length = memory_objects[0][0].length
size = mo_length - (page_addr + b - mo_base)
merged_to = b + size
merged_val = self._merge_values(to_merge, mo_length, memory=memory)
if merged_val is None:
# merge_values() determines that we should not attempt to merge this value
continue
# do the replacement
# TODO: Implement in-place replacement instead of calling store()
# new_object = self._replace_memory_object(our_mo, merged_val, page_addr, memory.page_size)
self.store(b,
SimMemoryObject(merged_val, mo_base, endness=the_endness),
size=size,
cooperate=True
)
# merged_objects.add(new_object)
# merged_objects.update(mos)
merged_offsets.add(b)
else:
# get the size that we can merge easily. This is the minimum of
# the size of all memory objects and unallocated spaces.
min_size = min([mo.length - (b + page_addr - mo.base) for mo, _ in memory_objects])
for um, _ in unconstrained_in:
for i in range(0, min_size):
if um._contains(b + i, page_addr):
min_size = i
break
merged_to = b + min_size
l.info("... determined minimum size of %d", min_size)
# Now, we have the minimum size. We'll extract/create expressions of that
# size and merge them
extracted = [(mo.bytes_at(page_addr+b, min_size), fv) for mo, fv in memory_objects] if min_size != 0 else []
created = [
(self._default_value(None, min_size, name="merge_uc_%s_%x" % (uc.id, b), memory=memory),
fv) for
uc, fv in unconstrained_in
]
to_merge = extracted + created
merged_val = self._merge_values(to_merge, min_size, memory=memory)
if merged_val is None:
continue
self.store(b,
SimMemoryObject(merged_val, page_addr+b, endness='Iend_BE'),
size=min_size,
endness='Iend_BE', cooperate=True
) # do not convert endianness again
merged_offsets.add(b)
self.stored_offset |= merged_offsets
return merged_offsets
def changed_bytes(self, other: 'ListPage', page_addr: int=None):
candidates: Set[int] = set()
if self.sinkhole is None:
candidates |= self.stored_offset
else:
for i in range(len(self.content)):
if self._contains(i, page_addr):
candidates.add(i)
if other.sinkhole is None:
candidates |= other.stored_offset
else:
for i in range(len(other.content)):
if other._contains(i, page_addr):
candidates.add(i)
byte_width = 8 # TODO: Introduce self.state if we want to use self.state.arch.byte_width
differences: Set[int] = set()
for c in candidates:
s_contains = self._contains(c, page_addr)
o_contains = other._contains(c, page_addr)
if not s_contains and o_contains:
differences.add(c)
elif s_contains and not o_contains:
differences.add(c)
else:
if self.content[c] is None:
self.content[c] = SimMemoryObject(self.sinkhole.bytes_at(page_addr+c, 1), page_addr + c,
byte_width=byte_width, endness='Iend_BE')
if other.content[c] is None:
other.content[c] = SimMemoryObject(other.sinkhole.bytes_at(page_addr+c, 1), page_addr + c,
byte_width=byte_width, endness='Iend_BE')
if s_contains and self.content[c] != other.content[c]:
same = None
if self._mo_cmp is not None:
same = self._mo_cmp(self.content[c], other.content[c], page_addr + c, 1)
if same is None:
# Try to see if the bytes are equal
self_byte = self.content[c].bytes_at(page_addr + c, 1)
other_byte = other.content[c].bytes_at(page_addr + c, 1)
same = self_byte is other_byte
if same is False:
differences.add(c)
else:
# this means the byte is in neither memory
pass
return differences
def _contains(self, off: int, page_addr: int):
if off >= len(self.content):
return False
if self.content[off] is not None:
return True
if self.sinkhole is None:
return False
return self.sinkhole.includes(page_addr + off)
def _replace_mo(self, old_mo: SimMemoryObject, new_mo: SimMemoryObject, page_addr: int,
page_size: int) -> SimMemoryObject:
if self.sinkhole is old_mo:
self.sinkhole = new_mo
else:
start, end = self._resolve_range(old_mo, page_addr, page_size)
for i in range(start, end):
if self.content[i-page_addr] is old_mo:
self.content[i-page_addr] = new_mo
return new_mo
@staticmethod
def _resolve_range(mo: SimMemoryObject, page_addr: int, page_size) -> Tuple[int,int]:
start = max(mo.base, page_addr)
end = min(mo.last_addr + 1, page_addr + page_size)
if end <= start:
l.warning("Nothing left of the memory object to store in SimPage.")
return start, end
def _get_object(self, start: int, page_addr: int) -> Optional[SimMemoryObject]:
mo = self.content[start]
if mo is None:
return None
if mo.includes(start + page_addr):
return mo
return None
| bsd-2-clause | -4,268,543,217,243,824,600 | 42.241135 | 174 | 0.542316 | false |
pombredanne/PyGithub | github/Authorization.py | 1 | 7655 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.AuthorizationApplication
class Authorization(github.GithubObject.CompletableGithubObject):
"""
This class represents Authorizations as returned for example by http://developer.github.com/v3/todo
"""
@property
def app(self):
"""
:type: :class:`github.AuthorizationApplication.AuthorizationApplication`
"""
self._completeIfNotSet(self._app)
return self._app.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def note(self):
"""
:type: string
"""
self._completeIfNotSet(self._note)
return self._note.value
@property
def note_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._note_url)
return self._note_url.value
@property
def scopes(self):
"""
:type: list of string
"""
self._completeIfNotSet(self._scopes)
return self._scopes.value
@property
def token(self):
"""
:type: string
"""
self._completeIfNotSet(self._token)
return self._token.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /authorizations/:id <http://developer.github.com/v3/oauth>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, scopes=github.GithubObject.NotSet, add_scopes=github.GithubObject.NotSet, remove_scopes=github.GithubObject.NotSet, note=github.GithubObject.NotSet, note_url=github.GithubObject.NotSet):
"""
:calls: `PATCH /authorizations/:id <http://developer.github.com/v3/oauth>`_
:param scopes: list of string
:param add_scopes: list of string
:param remove_scopes: list of string
:param note: string
:param note_url: string
:rtype: None
"""
assert scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in scopes), scopes
assert add_scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_scopes), add_scopes
assert remove_scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_scopes), remove_scopes
assert note is github.GithubObject.NotSet or isinstance(note, (str, unicode)), note
assert note_url is github.GithubObject.NotSet or isinstance(note_url, (str, unicode)), note_url
post_parameters = dict()
if scopes is not github.GithubObject.NotSet:
post_parameters["scopes"] = scopes
if add_scopes is not github.GithubObject.NotSet:
post_parameters["add_scopes"] = add_scopes
if remove_scopes is not github.GithubObject.NotSet:
post_parameters["remove_scopes"] = remove_scopes
if note is not github.GithubObject.NotSet:
post_parameters["note"] = note
if note_url is not github.GithubObject.NotSet:
post_parameters["note_url"] = note_url
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._app = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._note = github.GithubObject.NotSet
self._note_url = github.GithubObject.NotSet
self._scopes = github.GithubObject.NotSet
self._token = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "app" in attributes: # pragma no branch
self._app = self._makeClassAttribute(github.AuthorizationApplication.AuthorizationApplication, attributes["app"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "note" in attributes: # pragma no branch
self._note = self._makeStringAttribute(attributes["note"])
if "note_url" in attributes: # pragma no branch
self._note_url = self._makeStringAttribute(attributes["note_url"])
if "scopes" in attributes: # pragma no branch
self._scopes = self._makeListOfStringsAttribute(attributes["scopes"])
if "token" in attributes: # pragma no branch
self._token = self._makeStringAttribute(attributes["token"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 | -3,392,938,758,577,682,400 | 41.06044 | 205 | 0.55885 | false |
andras-tim/StoreKeeper | server/test/e2e/views/test_work.py | 1 | 17825 | from app.modules.example_data import ExampleWorkItems as WorkItems, ExampleWorks as Works, \
ExampleItems as Items, ExampleVendors as Vendors, ExampleUnits as Units, ExampleCustomers as Customers, \
ExampleUsers as Users
from test.e2e.base_api_test import CommonApiTest, append_mandatory_field_tests
from test.e2e.base_session_test import CommonSessionTest
from test.e2e.base_session_test_w_mutable_item import CommonSessionTestWithItemManipulation
class TestWorkWithBrandNewDb(CommonApiTest):
ENDPOINT = '/works'
INIT_PUSH = [
('/customers', [Customers.CUSTOMER1, Customers.CUSTOMER2]),
]
def test_new_db(self):
self.assertApiGet(expected_data=[])
self.assertApiGet(1, expected_status_codes=404)
def test_adding_new_works(self):
self.assertApiPost(data=Works.WORK1, expected_data=Works.WORK1)
self.assertApiPost(data=Works.WORK2, expected_data=Works.WORK2)
def test_can_add_work_with_same_comment(self):
self.assertApiPost(data=Works.WORK1)
self.assertApiPost(data=Works.WORK2.set(
change={'comment': Works.WORK1['comment']}))
class TestUserWithPreFilledDb(CommonApiTest):
ENDPOINT = '/works'
INIT_PUSH = [
('/customers', [Customers.CUSTOMER1, Customers.CUSTOMER2]),
(ENDPOINT, [Works.WORK1, Works.WORK2]),
]
def test_list_works(self):
self.assertApiGet(expected_data=[Works.WORK1,
Works.WORK2])
def test_get_work(self):
self.assertApiGet(2, expected_data=Works.WORK2)
self.assertApiGet(1, expected_data=Works.WORK1)
def test_remove_work(self):
self.assertApiDelete(1)
self.assertApiGet(expected_data=[Works.WORK2.get()])
def test_can_not_remove_non_existed_work(self):
self.assertApiDelete(3, expected_status_codes=404)
self.assertApiGet(expected_data=[Works.WORK1,
Works.WORK2])
def test_update_work(self):
request = Works.WORK2.set(change={'comment': 'Something are not finished'})
response = Works.WORK2.get(change={'comment': request['comment']})
self.assertApiPut(Works.WORK2['id'], data=request, expected_data=response)
self.assertApiGet(expected_data=[Works.WORK1,
response])
class TesCloseOutboundOfWorkWithoutWorkItems(CommonSessionTest):
ENDPOINT = '/works'
INIT_PUSH = [
('/users', [Users.USER1]),
('/customers', [Customers.CUSTOMER1]),
]
def setUp(self):
super().setUp()
self.assertApiLogin(Users.USER1)
def test_can_not_close_outbound_of_non_existed_work(self):
self.assertApiPut(1, url_suffix='/close-outbound',
expected_status_codes=404)
def test_can_close_outbound_once(self):
self.assertApiPost(data=Works.WORK1, expected_data=Works.WORK1)
self.assertApiPut(1, url_suffix='/close-outbound',
expected_data=Works.WORK1_OUTBOUND_CLOSED)
def test_can_not_close_outbound_twice(self):
self.assertApiPost(data=Works.WORK1, expected_data=Works.WORK1)
self.assertApiPut(1, url_suffix='/close-outbound')
self.assertApiPut(1, url_suffix='/close-outbound',
expected_data={'message': 'Outbound items have been closed.'},
expected_status_codes=422)
class TesCloseReturnedOfWorkWithoutWorkItems(CommonSessionTest):
ENDPOINT = '/works'
INIT_PUSH = [
('/users', [Users.USER1]),
('/customers', [Customers.CUSTOMER1]),
]
def setUp(self):
super().setUp()
self.assertApiLogin(Users.USER1)
def test_can_not_close_returned_of_non_existed_work(self):
self.assertApiPut(1, url_suffix='/close-returned',
expected_status_codes=404)
def test_can_not_close_returned_before_not_closed_outbound_of_work(self):
self.assertApiPost(data=Works.WORK1, expected_data=Works.WORK1)
self.assertApiPut(1, url_suffix='/close-returned',
expected_data={'message': 'Outbound items have not been closed.'},
expected_status_codes=422)
def test_can_close_returned_once(self):
self.assertApiPost(data=Works.WORK1, expected_data=Works.WORK1)
self.assertApiPut(1, url_suffix='/close-outbound')
self.assertApiPut(1, url_suffix='/close-returned',
expected_data=Works.WORK1_RETURNED_CLOSED)
def test_can_not_close_returned_twice(self):
self.assertApiPost(data=Works.WORK1, expected_data=Works.WORK1)
self.assertApiPut(1, url_suffix='/close-outbound')
self.assertApiPut(1, url_suffix='/close-returned')
self.assertApiPut(1, url_suffix='/close-returned',
expected_data={'message': 'Returned items have been closed.'},
expected_status_codes=422)
@append_mandatory_field_tests(item_name='work_item', base_item=WorkItems.ITEM1,
mandatory_fields=['item', 'outbound_quantity'])
class TestWorkItemWithBrandNewDb(CommonApiTest):
ENDPOINT = '/works/1/items'
BAD_ENDPOINT = '/works/2/items'
INIT_PUSH = [
('/customers', [Customers.CUSTOMER1, Customers.CUSTOMER2]),
('/works', [Works.WORK1]),
('/vendors', [Vendors.VENDOR1, Vendors.VENDOR2]),
('/units', [Units.UNIT1, Units.UNIT2]),
('/items', [Items.ITEM1, Items.ITEM2]),
]
def test_new_db(self):
self.assertApiGet(expected_data=[])
self.assertApiGet(1, expected_status_codes=404)
self.assertApiGet(endpoint=self.BAD_ENDPOINT,
expected_status_codes=404)
def test_adding_new_work_items(self):
self.assertApiPost(data=WorkItems.ITEM1, expected_data=WorkItems.ITEM1)
self.assertApiPost(data=WorkItems.ITEM2, expected_data=WorkItems.ITEM2)
def test_can_not_adding_new_item_to_a_non_existed_work(self):
self.assertApiPost(data=WorkItems.ITEM1, endpoint=self.BAD_ENDPOINT,
expected_status_codes=404)
def test_can_add_work_item_with_minimal_quantities(self):
self.assertApiPost(data=WorkItems.ITEM1.set(change={'outbound_quantity': 1}))
self.assertApiPost(data=WorkItems.ITEM2.set(change={'returned_quantity': 0}))
def test_can_not_add_work_item_with_zero_outbound_quantity(self):
self.assertApiPost(data=WorkItems.ITEM1.set(change={'outbound_quantity': 0}), expected_status_codes=422)
def test_can_not_add_work_item_with_lower_than_zero_returned_quantity(self):
self.assertApiPost(data=WorkItems.ITEM1.set(change={'returned_quantity': -1}), expected_status_codes=422)
def test_can_add_work_item_with_more_returned_quantity_than_outbound_quantity(self):
self.assertApiPost(data=WorkItems.ITEM1.set(change={'outbound_quantity': 1, 'returned_quantity': 3}))
def test_can_not_add_more_than_once_an_item_to_a_work(self):
self.assertApiPost(data=WorkItems.ITEM1)
self.assertApiPost(data=WorkItems.ITEM2.set(change={'item': WorkItems.ITEM1['item']}),
expected_data={'message': {'item_id, work_id': ['Already exists.']}},
expected_status_codes=422)
class TestWorkItemWithPreFilledDb(CommonApiTest):
ENDPOINT = '/works/1/items'
BAD_ENDPOINT = '/works/2/items'
INIT_PUSH = [
('/customers', [Customers.CUSTOMER1, Customers.CUSTOMER2]),
('/works', [Works.WORK1]),
('/vendors', [Vendors.VENDOR1, Vendors.VENDOR2]),
('/units', [Units.UNIT1, Units.UNIT2]),
('/items', [Items.ITEM1, Items.ITEM2, Items.ITEM3]),
(ENDPOINT, [WorkItems.ITEM1, WorkItems.ITEM2]),
]
def test_list_work_items(self):
self.assertApiGet(expected_data=[WorkItems.ITEM1,
WorkItems.ITEM2])
def test_can_not_list_work_items_of_a_non_existed_work(self):
self.assertApiGet(endpoint=self.BAD_ENDPOINT,
expected_status_codes=404)
def test_get_work_item(self):
self.assertApiGet(2, expected_data=WorkItems.ITEM2)
self.assertApiGet(1, expected_data=WorkItems.ITEM1)
def test_can_not_get_work_item_of_a_non_existed_work(self):
self.assertApiGet(1, endpoint=self.BAD_ENDPOINT,
expected_status_codes=404)
def test_remove_work_item(self):
self.assertApiDelete(1)
self.assertApiGet(expected_data=[WorkItems.ITEM2])
def test_can_not_remove_work_item_of_a_non_existed_work(self):
self.assertApiDelete(1, endpoint=self.BAD_ENDPOINT,
expected_status_codes=404)
def test_can_not_remove_non_existed_work_item(self):
self.assertApiDelete(4, expected_status_codes=404)
self.assertApiGet(expected_data=[WorkItems.ITEM1,
WorkItems.ITEM2])
def test_update_work_item(self):
request = WorkItems.ITEM2.set(change={'item': Items.ITEM3.get(),
'outbound_quantity': 8,
'returned_quantity': 8})
response = WorkItems.ITEM2.get(change={'item': request['item'],
'outbound_quantity': request['outbound_quantity'],
'returned_quantity': request['returned_quantity']})
self.assertApiPut(WorkItems.ITEM2['id'], data=request, expected_data=response)
self.assertApiGet(expected_data=[WorkItems.ITEM1, response])
def test_can_not_update_work_item_of_a_non_existed_work(self):
self.assertApiPut(WorkItems.ITEM1['id'], data=WorkItems.ITEM1, endpoint=self.BAD_ENDPOINT,
expected_status_codes=404)
class TesCloseOutboundOfWorkWithWorkItems(CommonSessionTestWithItemManipulation):
ENDPOINT = '/works/1/items'
INIT_PUSH = [
('/users', [Users.USER1]),
('/customers', [Customers.CUSTOMER1, Customers.CUSTOMER2]),
('/works', [Works.WORK1]),
('/vendors', [Vendors.VENDOR1, Vendors.VENDOR2]),
('/units', [Units.UNIT1, Units.UNIT2]),
('/items', [Items.ITEM1, Items.ITEM2]),
(ENDPOINT, [WorkItems.ITEM1, WorkItems.ITEM2]),
]
def setUp(self):
super().setUp()
self.assertApiLogin(Users.USER1)
def test_can_not_close_outbound_with_insufficient_item_quantities(self):
self.assertApiPut(Works.WORK1['id'], endpoint='/works', url_suffix='/close-outbound',
expected_data={'message': 'insufficient quantities for close the outbound work items: '
'\'Spray\': 0.0 - 41.2, \'Pipe\': 0.0 - 132.8'
},
expected_status_codes=422)
self.assertApiGet(Works.WORK1['id'], endpoint='/works', expected_data=Works.WORK1)
self.assertApiGet(expected_data=[
WorkItems.ITEM1,
WorkItems.ITEM2,
])
def test_can_not_close_outbound_with_one_insufficient_item_quantity(self):
self._set_item_quantity({'item_id': WorkItems.ITEM1['item']['id'], 'quantity': 1000.0})
self.assertApiPut(Works.WORK1['id'], endpoint='/works', url_suffix='/close-outbound',
expected_data={'message': 'insufficient quantities for close the outbound work items: '
'\'Spray\': 0.0 - 41.2'
},
expected_status_codes=422)
self.assertApiGet(Works.WORK1['id'], endpoint='/works', expected_data=Works.WORK1)
self.assertApiGet(expected_data=[
WorkItems.ITEM1.get(change={'item': {'quantity': 1000.0}}),
WorkItems.ITEM2,
])
def test_can_close_outbound_with_enough_item_quantities(self):
self._set_item_quantity(
{'item_id': WorkItems.ITEM1['item']['id'], 'quantity': 1000.0},
{'item_id': WorkItems.ITEM2['item']['id'], 'quantity': 1000.0},
)
self.assertApiPut(Works.WORK1['id'], endpoint='/works', url_suffix='/close-outbound')
self.assertApiGet(Works.WORK1['id'], endpoint='/works', expected_data=Works.WORK1_OUTBOUND_CLOSED)
self.assertApiGet(expected_data=[
WorkItems.ITEM1.get(change={'item': {'quantity': 1000.0 - WorkItems.ITEM1['outbound_quantity']}}),
WorkItems.ITEM2.get(change={'item': {'quantity': 1000.0 - WorkItems.ITEM2['outbound_quantity']}}),
])
def test_can_close_outbound_with_just_enough_item_quantities(self):
self._set_item_quantity(
{'item_id': WorkItems.ITEM1['item']['id'], 'quantity': WorkItems.ITEM1['outbound_quantity']},
{'item_id': WorkItems.ITEM2['item']['id'], 'quantity': WorkItems.ITEM2['outbound_quantity']},
)
self.assertApiPut(Works.WORK1['id'], endpoint='/works', url_suffix='/close-outbound')
self.assertApiGet(Works.WORK1['id'], endpoint='/works', expected_data=Works.WORK1_OUTBOUND_CLOSED)
self.assertApiGet(expected_data=[
WorkItems.ITEM1.get(change={'item': {'quantity': 0.0}}),
WorkItems.ITEM2.get(change={'item': {'quantity': 0.0}}),
])
class TesCloseReturnedOfWorkWithWorkItems(CommonSessionTestWithItemManipulation):
ENDPOINT = '/works/1/items'
INIT_PUSH = [
('/users', [Users.USER1]),
('/customers', [Customers.CUSTOMER1, Customers.CUSTOMER2]),
('/works', [Works.WORK1]),
('/vendors', [Vendors.VENDOR1, Vendors.VENDOR2]),
('/units', [Units.UNIT1, Units.UNIT2]),
('/items', [Items.ITEM1, Items.ITEM2]),
(ENDPOINT, [WorkItems.ITEM1, WorkItems.ITEM2]),
]
def setUp(self):
super().setUp()
# set just enough item quantity for closing outbound
self._set_item_quantity(
{'item_id': WorkItems.ITEM1['item']['id'], 'quantity': WorkItems.ITEM1['outbound_quantity']},
{'item_id': WorkItems.ITEM2['item']['id'], 'quantity': WorkItems.ITEM2['outbound_quantity']},
)
self.assertApiLogin(Users.USER1)
self.assertApiPut(Works.WORK1['id'], endpoint='/works', url_suffix='/close-outbound')
def test_can_close_returned_item_quantities(self):
self.assertApiPut(WorkItems.ITEM1['id'], data=WorkItems.ITEM1.set(change={'returned_quantity': 4.4}))
self.assertApiPut(WorkItems.ITEM2['id'], data=WorkItems.ITEM2.set(change={'returned_quantity': 9.9}))
self.assertApiPut(Works.WORK1_OUTBOUND_CLOSED['id'], endpoint='/works', url_suffix='/close-returned')
self.assertApiGet(Works.WORK1_OUTBOUND_CLOSED['id'], endpoint='/works',
expected_data=Works.WORK1_RETURNED_CLOSED)
self.assertApiGet(expected_data=[
WorkItems.ITEM1.get(change={'item': {'quantity': 4.4}, 'returned_quantity': 4.4}),
WorkItems.ITEM2.get(change={'item': {'quantity': 9.9}, 'returned_quantity': 9.9}),
])
def test_can_close_returned_with_zero_and_none_item_quantities(self):
# WorkItems.ITEM1 has returned_quantity=None
self.assertApiPut(WorkItems.ITEM2['id'], data=WorkItems.ITEM2.set(change={'returned_quantity': 0.0}))
self.assertApiPut(Works.WORK1_OUTBOUND_CLOSED['id'], endpoint='/works', url_suffix='/close-returned')
self.assertApiGet(Works.WORK1_OUTBOUND_CLOSED['id'], endpoint='/works',
expected_data=Works.WORK1_RETURNED_CLOSED)
self.assertApiGet(expected_data=[
WorkItems.ITEM1.get(change={'item': {'quantity': 0.0}, 'returned_quantity': 0.0}),
WorkItems.ITEM2.get(change={'item': {'quantity': 0.0}, 'returned_quantity': 0.0}),
])
class TestWorkItemWithClosedOutbound(CommonSessionTestWithItemManipulation):
ENDPOINT = '/works/1/items'
BAD_ENDPOINT = '/works/2/items'
INIT_PUSH = [
('/users', [Users.USER1]),
('/customers', [Customers.CUSTOMER1, Customers.CUSTOMER2]),
('/works', [Works.WORK1]),
('/vendors', [Vendors.VENDOR1, Vendors.VENDOR2]),
('/units', [Units.UNIT1, Units.UNIT2]),
('/items', [Items.ITEM1, Items.ITEM2]),
(ENDPOINT, [WorkItems.ITEM1]),
]
def setUp(self):
super().setUp()
self._set_item_quantity({'item_id': WorkItems.ITEM1.get()['item']['id'], 'quantity': 1000.0})
self.assertApiLogin(Users.USER1)
self.assertApiPut(Works.WORK1['id'], endpoint='/works', url_suffix='/close-outbound')
def test_can_not_add_new_work_item_after_outbound_items_are_closed(self):
self.assertApiPost(data=WorkItems.ITEM2.set(change={'work': Works.WORK1.get()}),
expected_data={'message': 'Can not add new item.'}, expected_status_codes=403)
def test_can_not_change_outbound_work_item_after_outbound_items_are_closed(self):
request = WorkItems.ITEM1.set(change={'item': Items.ITEM1.get()})
self.assertApiPut(1, data=request,
expected_data={'message': 'Work item was closed.'}, expected_status_codes=403)
request = WorkItems.ITEM1.set(change={'outbound_quantity': WorkItems.ITEM1['outbound_quantity'] + 1})
self.assertApiPut(1, data=request,
expected_data={'message': 'Work item was closed.'}, expected_status_codes=403)
def test_can_not_delete_work_item_after_outbound_items_are_closed(self):
self.assertApiDelete(1,
expected_data={'message': 'Can not delete item.'}, expected_status_codes=403)
| gpl-2.0 | 7,240,236,778,862,567,000 | 45.298701 | 113 | 0.620084 | false |
andres-hurtado-lopez/naranjaverdeprod | app/functionality_access/__init__.py | 1 | 3538 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import template, redirect, request
import utils, json
from passlib.hash import bcrypt
def GET(**params):
table = utils.RenderTable(\
"""SELECT * FROM users""",\
(),\
u"""
<tr>
<th><input type="checkbox"></th>
<th>Usuario</th>
<th>Nombre Completo</th>
<th>Departamento</th>
<th>Tipo</th>
</tr>
""",\
u"""
<tr>
<td scope="row"><input type="checkbox" data-docmat="{user}"></td>
<td><a href="/web/functionality_access/user_functionality_access?id={user}">{user}</a></td>
<td>{full_name}</td>
<td>{department}</td>
<td>{type}</td>
</tr>""",\
'table table-bordered',\
10,\
int(params.get('table_table-bordered_page','1'))\
)
return template('functionality_access_index.html',table=table)
def user_functionality_access_GET(**params):
db = utils.ConnectDB()
secobj_to_name = lambda x: utils.secobjs.get(x,{'name':''})['name']
table_asignado = utils.RenderTable(\
"""SELECT secobj FROM users_secobjs WHERE user = %s""",\
(params.get('id',''),),\
u"""
<tr>
<th><input type="checkbox"></th>
<th># Objeto</th>
<th>Descripcion</th>
</tr>
""",\
u"""
<tr>
<td scope="row"><input type="checkbox" data-asignado="{secobj}"></td>
<td>{secobj}</td>
<td>{description}</td>
</tr>""",\
'table table-bordered',\
5000,\
int(params.get('table_table-bordered_page','1')),\
{
'secobj':
{
'out_col':'description',
'conv_func': secobj_to_name
}
}
)
db.execute("SELECT secobj FROM users_secobjs WHERE user = %s",(params.get('id',''),))
available = db.fetchall()
not_available = [{'secobj':psecobj, 'description': secobj_to_name(psecobj)} for psecobj in utils.secobjs if not psecobj in [prow['secobj'] for prow in available]]
table_no_asignado = u"""\
<div class="table-responsive">
<table class="table table-bordered">
<thead>
<tr>
<th><input type="checkbox"></th>
<th># Objeto</th>
<th>Descripcion</th>
</tr>
</thead>\n\
<tbody>"""
for row in not_available:
table_no_asignado += u"""
<tr>
<td scope="row"><input type="checkbox" data-no_asignado="{secobj}"></td>
<td>{secobj}</td>
<td>{description}</td>
</tr>""".format(**row)
table_no_asignado += """\n\
</tbody>\n\
</table>\n\
</div>\n\
"""
return template('functionality_access_editor.html',user=params.get('id',''),table_asignado=table_asignado, table_no_asignado=table_no_asignado)
def user_functionality_access_POST(**params):
db = utils.ConnectDB()
params = json.loads(request.body.read())
secobjs = params['secobjs']
user = params['user']
for secobj in secobjs:
db.execute('REPLACE INTO users_secobjs (`user`, secobj) VALUES (%s, %s)',(user, secobj))
if len(secobjs) > 0:
db.execute('COMMIT WORK')
return {}
def user_functionality_access_DELETE(**params):
db = utils.ConnectDB()
params = json.loads(request.body.read())
secobjs = params['secobjs']
user = params['user']
for secobj in secobjs:
db.execute('DELETE FROM users_secobjs WHERE `user` = %s AND secobj = %s',(user, secobj))
if len(secobjs) > 0:
db.execute('COMMIT WORK')
return {}
| mit | -3,747,365,128,313,200,000 | 25.601504 | 166 | 0.552289 | false |
geodynamics/lithomop | lithomop3d/tests/lithomop3dapp.py | 1 | 1631 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Lithomop3d by Charles A. Williams
# Copyright (c) 2003-2005 Rensselaer Polytechnic Institute
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
# main
if __name__ == "__main__":
from lithomop3d.Application import Application
app = Application()
app.main()
# version
__id__ = "$Id: lithomop3dapp.py,v 1.1 2004/04/14 21:26:20 willic3 Exp $"
# End of file
| mit | -2,781,755,995,276,597,000 | 35.244444 | 80 | 0.647456 | false |
lorensen/VTKExamples | src/Python/Widgets/EmbedInPyQt.py | 1 | 1287 | #!/usr/bin/env python
import sys
import vtk
from PyQt4 import QtGui
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.frame = QtGui.QFrame()
self.vl = QtGui.QVBoxLayout()
self.vtkWidget = QVTKRenderWindowInteractor(self.frame)
self.vl.addWidget(self.vtkWidget)
self.ren = vtk.vtkRenderer()
self.vtkWidget.GetRenderWindow().AddRenderer(self.ren)
self.iren = self.vtkWidget.GetRenderWindow().GetInteractor()
# Create source
source = vtk.vtkSphereSource()
source.SetCenter(0, 0, 0)
source.SetRadius(5.0)
# Create a mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
# Create an actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self.ren.AddActor(actor)
self.ren.ResetCamera()
self.frame.setLayout(self.vl)
self.setCentralWidget(self.frame)
self.show()
self.iren.Initialize()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
| apache-2.0 | 2,054,659,394,894,716,700 | 22.833333 | 73 | 0.639472 | false |
Passaudage/PLD | simulateur/jacky.py | 1 | 5825 | import GenerateurEntrees
import Intersection
import SimulationManager
import Troncon
import Coordonnees
import Vehicule
def charger_simulateur():
longueur_troncon = 5000
sm = SimulationManager.SimulationManager(5)
gen_sud = GenerateurEntrees.GenerateurEntrees([[1 , 20]])
sm.add_listener(gen_sud)
gen_ouest = GenerateurEntrees.GenerateurEntrees([[1 , 3]])
sm.add_listener(gen_ouest)
gen_est = GenerateurEntrees.GenerateurEntrees([[1 , 5]])
sm.add_listener(gen_est)
gen_nord = GenerateurEntrees.GenerateurEntrees([[1 , 3]])
sm.add_listener(gen_nord)
i = Intersection.Intersection(sm, Coordonnees.Coordonnees(6050, 6050), 2100, 2100)
sm.add_listener(i)
t_sud = Troncon.Troncon(i,
None,
Coordonnees.Coordonnees(6050, 0),
Coordonnees.Coordonnees(6050, longueur_troncon),
{"G" : 0.2 , "TD" : 0.5 , "D": 0.3},
{"G": 0.3, "TD": 0.2, "D": 0.5})
t_sud.ajouter_generateur("sens1",gen_sud)
t_est = Troncon.Troncon(None, i, Coordonnees.Coordonnees(7100, 6050), Coordonnees.Coordonnees(7100+longueur_troncon, 6050),
{"G": 0.2, "TD": 0.5, "D": 0.3},
{"G": 0.3, "TD": 0.2, "D": 0.5})
t_est.ajouter_generateur("sens2",gen_est)
t_ouest = Troncon.Troncon(i, None, Coordonnees.Coordonnees(0, 6050), Coordonnees.Coordonnees(longueur_troncon, 6050),
{"G": 0.5, "TD": 0.2, "D": 0.3},
{"G": 0.1, "TD": 0.7, "D": 0.2})
t_ouest.ajouter_generateur("sens1",gen_ouest)
t_nord = Troncon.Troncon(None, i, Coordonnees.Coordonnees(6050, 7100), Coordonnees.Coordonnees(6050, longueur_troncon+7100),
{"G": 0.2, "TD": 0.4, "D": 0.4},
{"G": 0.3, "TD": 0.5, "D": 0.2})
t_nord.ajouter_generateur("sens2",gen_nord)
t_sud.creer_voie(["G"], "sens1", 1388)
t_sud.creer_voie(["TD"], "sens1", 1388)
t_sud.creer_voie(["D"], "sens1", 1388)
t_sud.creer_voie(["G"], "sens2", 1388)
t_sud.creer_voie(["TD"], "sens2", 1388)
t_sud.creer_voie(["D"], "sens2", 1388)
t_nord.creer_voie(["G"], "sens1", 1388)
t_nord.creer_voie(["TD"], "sens1", 1388)
t_nord.creer_voie(["D"], "sens1", 1388)
t_nord.creer_voie(["G"], "sens2", 1388)
t_nord.creer_voie(["TD"], "sens2", 1388)
t_nord.creer_voie(["D"], "sens2", 1388)
t_est.creer_voie(["G"], "sens1", 1388)
t_est.creer_voie(["TD"], "sens1", 1388)
t_est.creer_voie(["D"], "sens1", 1388)
t_est.creer_voie(["G"], "sens2", 1388)
t_est.creer_voie(["TD"], "sens2", 1388)
t_est.creer_voie(["D"], "sens2", 1388)
t_ouest.creer_voie(["G"], "sens1", 1388)
t_ouest.creer_voie(["TD"], "sens1", 1388)
t_ouest.creer_voie(["D"], "sens1", 1388)
t_ouest.creer_voie(["G"], "sens2", 1388)
t_ouest.creer_voie(["TD"], "sens2", 1388)
t_ouest.creer_voie(["D"], "sens2", 1388)
t_ouest.donner_voies_intersections()
t_est.donner_voies_intersections()
t_nord.donner_voies_intersections()
t_sud.donner_voies_intersections()
i.creer_feux()
#~ gen_sud.ajoute_voie_entrante(t_sud.voies_sens2)
#~ gen_est.ajoute_voie_entrante(t_est.voies_sens1)
#~ gen_ouest.ajoute_voie_entrante(t_ouest.voies_sens2)
#~ gen_nord.ajoute_voie_entrante(t_nord.voies_sens1)
#~ gen_sud.ajoute_voie_sortante(t_sud.voies_sens1)
#~ gen_est.ajoute_voie_sortante(t_est.voies_sens2)
#~ gen_ouest.ajoute_voie_sortante(t_ouest.voies_sens1)
#~ gen_nord.ajoute_voie_sortante(t_nord.voies_sens2)
#~ t_est.voies_sens2[0].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[1].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[2].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[0].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[1].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[2].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[0].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[1].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[2].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[0].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[1].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[2].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[0].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[1].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[2].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[0].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[1].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[2].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[0].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[1].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[2].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[0].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[1].creer_vehicule(sm, 0, 500)
#~ t_est.voies_sens2[2].creer_vehicule(sm, 0, 500)
#~
i.trouver_configurations_feux()
#~ print(i.combinaisons)
#for k,v in i.combinaisons.items():
# print(str(k)+" "+str(v))
# for f in v:
# print("feu " + str(f[0]))
#raise Exception("Bonfante")
return sm
def main():
a = Coordonnees.Coordonnees(0,1)
b = a
a = a + a
print(a + b)
sm = charger_simulateur()
liste_v = Vehicule.Vehicule.liste_voitures
#print(liste_v)
toto = liste_v[0]
#print(toto.origine)
#~ toto.notifie_temps(5,sm)
#~ toto.notifie_temps(5,sm)
#~ toto.notifie_temps(5,sm)
#~ toto.notifie_temps(5,sm)
#~ toto.notifie_temps(5,sm)
#~ for i in range(10000):
#~ sm.avance_temps()
toto = liste_v[0]
#print(toto.intersection)
for v in liste_v:
print(v.coordonnees)
pass
if __name__ == '__main__':
main()
| mit | -3,497,025,016,034,291,000 | 34.090361 | 128 | 0.586266 | false |
dennisss/sympy | sympy/core/evalf.py | 1 | 47559 | """
Adaptive numerical evaluation of SymPy expressions, using mpmath
for mathematical functions.
"""
from __future__ import print_function, division
import math
import sympy.mpmath.libmp as libmp
from sympy.mpmath import make_mpc, make_mpf, mp, mpc, mpf, nsum, quadts, quadosc
from sympy.mpmath import inf as mpmath_inf
from sympy.mpmath.libmp import (from_int, from_man_exp, from_rational, fhalf,
fnan, fnone, fone, fzero, mpf_abs, mpf_add,
mpf_atan, mpf_atan2, mpf_cmp, mpf_cos, mpf_e, mpf_exp, mpf_log, mpf_lt,
mpf_mul, mpf_neg, mpf_pi, mpf_pow, mpf_pow_int, mpf_shift, mpf_sin,
mpf_sqrt, normalize, round_nearest, to_int, to_str)
from sympy.mpmath.libmp import bitcount as mpmath_bitcount
from sympy.mpmath.libmp.backend import MPZ
from sympy.mpmath.libmp.libmpc import _infs_nan
from sympy.mpmath.libmp.libmpf import dps_to_prec
from sympy.mpmath.libmp.gammazeta import mpf_bernoulli
from .compatibility import SYMPY_INTS
from .sympify import sympify
from .core import C
from .singleton import S
from .containers import Tuple
from sympy.utilities.iterables import is_sequence
LG10 = math.log(10, 2)
rnd = round_nearest
def bitcount(n):
return mpmath_bitcount(int(n))
# Used in a few places as placeholder values to denote exponents and
# precision levels, e.g. of exact numbers. Must be careful to avoid
# passing these to mpmath functions or returning them in final results.
INF = float(mpmath_inf)
MINUS_INF = float(-mpmath_inf)
# ~= 100 digits. Real men set this to INF.
DEFAULT_MAXPREC = 333
class PrecisionExhausted(ArithmeticError):
pass
#----------------------------------------------------------------------------#
# #
# Helper functions for arithmetic and complex parts #
# #
#----------------------------------------------------------------------------#
"""
An mpf value tuple is a tuple of integers (sign, man, exp, bc)
representing a floating-point number: [1, -1][sign]*man*2**exp where
sign is 0 or 1 and bc should correspond to the number of bits used to
represent the mantissa (man) in binary notation, e.g.
>>> from sympy.core.evalf import bitcount
>>> sign, man, exp, bc = 0, 5, 1, 3
>>> n = [1, -1][sign]*man*2**exp
>>> n, bitcount(man)
(10, 3)
A temporary result is a tuple (re, im, re_acc, im_acc) where
re and im are nonzero mpf value tuples representing approximate
numbers, or None to denote exact zeros.
re_acc, im_acc are integers denoting log2(e) where e is the estimated
relative accuracy of the respective complex part, but may be anything
if the corresponding complex part is None.
"""
def fastlog(x):
"""Fast approximation of log2(x) for an mpf value tuple x.
Notes: Calculated as exponent + width of mantissa. This is an
approximation for two reasons: 1) it gives the ceil(log2(abs(x)))
value and 2) it is too high by 1 in the case that x is an exact
power of 2. Although this is easy to remedy by testing to see if
the odd mpf mantissa is 1 (indicating that one was dealing with
an exact power of 2) that would decrease the speed and is not
necessary as this is only being used as an approximation for the
number of bits in x. The correct return value could be written as
"x[2] + (x[3] if x[1] != 1 else 0)".
Since mpf tuples always have an odd mantissa, no check is done
to see if the mantissa is a multiple of 2 (in which case the
result would be too large by 1).
Examples
========
>>> from sympy import log
>>> from sympy.core.evalf import fastlog, bitcount
>>> s, m, e = 0, 5, 1
>>> bc = bitcount(m)
>>> n = [1, -1][s]*m*2**e
>>> n, (log(n)/log(2)).evalf(2), fastlog((s, m, e, bc))
(10, 3.3, 4)
"""
if not x or x == fzero:
return MINUS_INF
return x[2] + x[3]
def pure_complex(v):
"""Return a and b if v matches a + I*b where b is not zero and
a and b are Numbers, else None.
>>> from sympy.core.evalf import pure_complex
>>> from sympy import Tuple, I
>>> a, b = Tuple(2, 3)
>>> pure_complex(a)
>>> pure_complex(a + b*I)
(2, 3)
>>> pure_complex(I)
(0, 1)
"""
h, t = v.as_coeff_Add()
c, i = t.as_coeff_Mul()
if i is S.ImaginaryUnit:
return h, c
def scaled_zero(mag, sign=1):
"""Return an mpf representing a power of two with magnitude ``mag``
and -1 for precision. Or, if ``mag`` is a scaled_zero tuple, then just
remove the sign from within the list that it was initially wrapped
in.
Examples
========
>>> from sympy.core.evalf import scaled_zero
>>> from sympy import Float
>>> z, p = scaled_zero(100)
>>> z, p
(([0], 1, 100, 1), -1)
>>> ok = scaled_zero(z)
>>> ok
(0, 1, 100, 1)
>>> Float(ok)
1.26765060022823e+30
>>> Float(ok, p)
0.e+30
>>> ok, p = scaled_zero(100, -1)
>>> Float(scaled_zero(ok), p)
-0.e+30
"""
if type(mag) is tuple and len(mag) == 4 and iszero(mag, scaled=True):
return (mag[0][0],) + mag[1:]
elif isinstance(mag, SYMPY_INTS):
if sign not in [-1, 1]:
raise ValueError('sign must be +/-1')
rv, p = mpf_shift(fone, mag), -1
s = 0 if sign == 1 else 1
rv = ([s],) + rv[1:]
return rv, p
else:
raise ValueError('scaled zero expects int or scaled_zero tuple.')
def iszero(mpf, scaled=False):
if not scaled:
return not mpf or not mpf[1] and not mpf[-1]
return mpf and type(mpf[0]) is list and mpf[1] == mpf[-1] == 1
def complex_accuracy(result):
"""
Returns relative accuracy of a complex number with given accuracies
for the real and imaginary parts. The relative accuracy is defined
in the complex norm sense as ||z|+|error|| / |z| where error
is equal to (real absolute error) + (imag absolute error)*i.
The full expression for the (logarithmic) error can be approximated
easily by using the max norm to approximate the complex norm.
In the worst case (re and im equal), this is wrong by a factor
sqrt(2), or by log2(sqrt(2)) = 0.5 bit.
"""
re, im, re_acc, im_acc = result
if not im:
if not re:
return INF
return re_acc
if not re:
return im_acc
re_size = fastlog(re)
im_size = fastlog(im)
absolute_error = max(re_size - re_acc, im_size - im_acc)
relative_error = absolute_error - max(re_size, im_size)
return -relative_error
def get_abs(expr, prec, options):
re, im, re_acc, im_acc = evalf(expr, prec + 2, options)
if not re:
re, re_acc, im, im_acc = im, im_acc, re, re_acc
if im:
return libmp.mpc_abs((re, im), prec), None, re_acc, None
elif re:
return mpf_abs(re), None, re_acc, None
else:
return None, None, None, None
def get_complex_part(expr, no, prec, options):
"""no = 0 for real part, no = 1 for imaginary part"""
workprec = prec
i = 0
while 1:
res = evalf(expr, workprec, options)
value, accuracy = res[no::2]
# XXX is the last one correct? Consider re((1+I)**2).n()
if (not value) or accuracy >= prec or -value[2] > prec:
return value, None, accuracy, None
workprec += max(30, 2**i)
i += 1
def evalf_abs(expr, prec, options):
return get_abs(expr.args[0], prec, options)
def evalf_re(expr, prec, options):
return get_complex_part(expr.args[0], 0, prec, options)
def evalf_im(expr, prec, options):
return get_complex_part(expr.args[0], 1, prec, options)
def finalize_complex(re, im, prec):
if re == fzero and im == fzero:
raise ValueError("got complex zero with unknown accuracy")
elif re == fzero:
return None, im, None, prec
elif im == fzero:
return re, None, prec, None
size_re = fastlog(re)
size_im = fastlog(im)
if size_re > size_im:
re_acc = prec
im_acc = prec + min(-(size_re - size_im), 0)
else:
im_acc = prec
re_acc = prec + min(-(size_im - size_re), 0)
return re, im, re_acc, im_acc
def chop_parts(value, prec):
"""
Chop off tiny real or complex parts.
"""
re, im, re_acc, im_acc = value
# Method 1: chop based on absolute value
if re and re not in _infs_nan and (fastlog(re) < -prec + 4):
re, re_acc = None, None
if im and im not in _infs_nan and (fastlog(im) < -prec + 4):
im, im_acc = None, None
# Method 2: chop if inaccurate and relatively small
if re and im:
delta = fastlog(re) - fastlog(im)
if re_acc < 2 and (delta - re_acc <= -prec + 4):
re, re_acc = None, None
if im_acc < 2 and (delta - im_acc >= prec - 4):
im, im_acc = None, None
return re, im, re_acc, im_acc
def check_target(expr, result, prec):
a = complex_accuracy(result)
if a < prec:
raise PrecisionExhausted("Failed to distinguish the expression: \n\n%s\n\n"
"from zero. Try simplifying the input, using chop=True, or providing "
"a higher maxn for evalf" % (expr))
def get_integer_part(expr, no, options, return_ints=False):
"""
With no = 1, computes ceiling(expr)
With no = -1, computes floor(expr)
Note: this function either gives the exact result or signals failure.
"""
# The expression is likely less than 2^30 or so
assumed_size = 30
ire, iim, ire_acc, iim_acc = evalf(expr, assumed_size, options)
# We now know the size, so we can calculate how much extra precision
# (if any) is needed to get within the nearest integer
if ire and iim:
gap = max(fastlog(ire) - ire_acc, fastlog(iim) - iim_acc)
elif ire:
gap = fastlog(ire) - ire_acc
elif iim:
gap = fastlog(iim) - iim_acc
else:
# ... or maybe the expression was exactly zero
return None, None, None, None
margin = 10
if gap >= -margin:
ire, iim, ire_acc, iim_acc = \
evalf(expr, margin + assumed_size + gap, options)
# We can now easily find the nearest integer, but to find floor/ceil, we
# must also calculate whether the difference to the nearest integer is
# positive or negative (which may fail if very close).
def calc_part(expr, nexpr):
nint = int(to_int(nexpr, rnd))
n, c, p, b = nexpr
if c != 1 and p != 0:
expr = C.Add(expr, -nint, evaluate=False)
x, _, x_acc, _ = evalf(expr, 10, options)
try:
check_target(expr, (x, None, x_acc, None), 3)
except PrecisionExhausted:
if not expr.equals(0):
raise PrecisionExhausted
x = fzero
nint += int(no*(mpf_cmp(x or fzero, fzero) == no))
nint = from_int(nint)
return nint, fastlog(nint) + 10
re, im, re_acc, im_acc = None, None, None, None
if ire:
re, re_acc = calc_part(C.re(expr, evaluate=False), ire)
if iim:
im, im_acc = calc_part(C.im(expr, evaluate=False), iim)
if return_ints:
return int(to_int(re or fzero)), int(to_int(im or fzero))
return re, im, re_acc, im_acc
def evalf_ceiling(expr, prec, options):
return get_integer_part(expr.args[0], 1, options)
def evalf_floor(expr, prec, options):
return get_integer_part(expr.args[0], -1, options)
#----------------------------------------------------------------------------#
# #
# Arithmetic operations #
# #
#----------------------------------------------------------------------------#
def add_terms(terms, prec, target_prec):
"""
Helper for evalf_add. Adds a list of (mpfval, accuracy) terms.
Returns
-------
- None, None if there are no non-zero terms;
- terms[0] if there is only 1 term;
- scaled_zero if the sum of the terms produces a zero by cancellation
e.g. mpfs representing 1 and -1 would produce a scaled zero which need
special handling since they are not actually zero and they are purposely
malformed to ensure that they can't be used in anything but accuracy
calculations;
- a tuple that is scaled to target_prec that corresponds to the
sum of the terms.
The returned mpf tuple will be normalized to target_prec; the input
prec is used to define the working precision.
XXX explain why this is needed and why one can't just loop using mpf_add
"""
from sympy.core.core import C
terms = [t for t in terms if not iszero(t)]
if not terms:
return None, None
elif len(terms) == 1:
return terms[0]
# see if any argument is NaN or oo and thus warrants a special return
special = []
for t in terms:
arg = C.Float._new(t[0], 1)
if arg is S.NaN or arg.is_unbounded:
special.append(arg)
if special:
from sympy.core.add import Add
rv = evalf(Add(*special), prec + 4, {})
return rv[0], rv[2]
working_prec = 2*prec
sum_man, sum_exp, absolute_error = 0, 0, MINUS_INF
for x, accuracy in terms:
sign, man, exp, bc = x
if sign:
man = -man
absolute_error = max(absolute_error, bc + exp - accuracy)
delta = exp - sum_exp
if exp >= sum_exp:
# x much larger than existing sum?
# first: quick test
if ((delta > working_prec) and
((not sum_man) or
delta - bitcount(abs(sum_man)) > working_prec)):
sum_man = man
sum_exp = exp
else:
sum_man += (man << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta - bc > working_prec:
if not sum_man:
sum_man, sum_exp = man, exp
else:
sum_man = (sum_man << delta) + man
sum_exp = exp
if not sum_man:
return scaled_zero(absolute_error)
if sum_man < 0:
sum_sign = 1
sum_man = -sum_man
else:
sum_sign = 0
sum_bc = bitcount(sum_man)
sum_accuracy = sum_exp + sum_bc - absolute_error
r = normalize(sum_sign, sum_man, sum_exp, sum_bc, target_prec,
rnd), sum_accuracy
#print "returning", to_str(r[0],50), r[1]
return r
def evalf_add(v, prec, options):
res = pure_complex(v)
if res:
h, c = res
re, _, re_acc, _ = evalf(h, prec, options)
im, _, im_acc, _ = evalf(c, prec, options)
return re, im, re_acc, im_acc
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
i = 0
target_prec = prec
while 1:
options['maxprec'] = min(oldmaxprec, 2*prec)
terms = [evalf(arg, prec + 10, options) for arg in v.args]
re, re_acc = add_terms(
[a[0::2] for a in terms if a[0]], prec, target_prec)
im, im_acc = add_terms(
[a[1::2] for a in terms if a[1]], prec, target_prec)
acc = complex_accuracy((re, im, re_acc, im_acc))
if acc >= target_prec:
if options.get('verbose'):
print("ADD: wanted", target_prec, "accurate bits, got", re_acc, im_acc)
break
else:
if (prec - target_prec) > options['maxprec']:
break
prec = prec + max(10 + 2**i, target_prec - acc)
i += 1
if options.get('verbose'):
print("ADD: restarting with prec", prec)
options['maxprec'] = oldmaxprec
if iszero(re, scaled=True):
re = scaled_zero(re)
if iszero(im, scaled=True):
im = scaled_zero(im)
return re, im, re_acc, im_acc
def evalf_mul(v, prec, options):
from sympy.core.core import C
res = pure_complex(v)
if res:
# the only pure complex that is a mul is h*I
_, h = res
im, _, im_acc, _ = evalf(h, prec, options)
return None, im, None, im_acc
args = list(v.args)
# see if any argument is NaN or oo and thus warrants a special return
special = []
for arg in args:
arg = evalf(arg, prec, options)
if arg[0] is None:
continue
arg = C.Float._new(arg[0], 1)
if arg is S.NaN or arg.is_unbounded:
special.append(arg)
if special:
from sympy.core.mul import Mul
special = Mul(*special)
return evalf(special, prec + 4, {})
# With guard digits, multiplication in the real case does not destroy
# accuracy. This is also true in the complex case when considering the
# total accuracy; however accuracy for the real or imaginary parts
# separately may be lower.
acc = prec
# XXX: big overestimate
working_prec = prec + len(args) + 5
# Empty product is 1
start = man, exp, bc = MPZ(1), 0, 1
# First, we multiply all pure real or pure imaginary numbers.
# direction tells us that the result should be multiplied by
# I**direction; all other numbers get put into complex_factors
# to be multiplied out after the first phase.
last = len(args)
direction = 0
args.append(S.One)
complex_factors = []
for i, arg in enumerate(args):
if i != last and pure_complex(arg):
args[-1] = (args[-1]*arg).expand()
continue
elif i == last and arg is S.One:
continue
re, im, re_acc, im_acc = evalf(arg, working_prec, options)
if re and im:
complex_factors.append((re, im, re_acc, im_acc))
continue
elif re:
(s, m, e, b), w_acc = re, re_acc
elif im:
(s, m, e, b), w_acc = im, im_acc
direction += 1
else:
return None, None, None, None
direction += 2*s
man *= m
exp += e
bc += b
if bc > 3*working_prec:
man >>= working_prec
exp += working_prec
acc = min(acc, w_acc)
sign = (direction & 2) >> 1
if not complex_factors:
v = normalize(sign, man, exp, bitcount(man), prec, rnd)
# multiply by i
if direction & 1:
return None, v, None, acc
else:
return v, None, acc, None
else:
# initialize with the first term
if (man, exp, bc) != start:
# there was a real part; give it an imaginary part
re, im = (sign, man, exp, bitcount(man)), (0, MPZ(0), 0, 0)
i0 = 0
else:
# there is no real part to start (other than the starting 1)
wre, wim, wre_acc, wim_acc = complex_factors[0]
acc = min(acc,
complex_accuracy((wre, wim, wre_acc, wim_acc)))
re = wre
im = wim
i0 = 1
for wre, wim, wre_acc, wim_acc in complex_factors[i0:]:
# acc is the overall accuracy of the product; we aren't
# computing exact accuracies of the product.
acc = min(acc,
complex_accuracy((wre, wim, wre_acc, wim_acc)))
use_prec = working_prec
A = mpf_mul(re, wre, use_prec)
B = mpf_mul(mpf_neg(im), wim, use_prec)
C = mpf_mul(re, wim, use_prec)
D = mpf_mul(im, wre, use_prec)
re = mpf_add(A, B, use_prec)
im = mpf_add(C, D, use_prec)
if options.get('verbose'):
print("MUL: wanted", prec, "accurate bits, got", acc)
# multiply by I
if direction & 1:
re, im = mpf_neg(im), re
return re, im, acc, acc
def evalf_pow(v, prec, options):
target_prec = prec
base, exp = v.args
# We handle x**n separately. This has two purposes: 1) it is much
# faster, because we avoid calling evalf on the exponent, and 2) it
# allows better handling of real/imaginary parts that are exactly zero
if exp.is_Integer:
p = exp.p
# Exact
if not p:
return fone, None, prec, None
# Exponentiation by p magnifies relative error by |p|, so the
# base must be evaluated with increased precision if p is large
prec += int(math.log(abs(p), 2))
re, im, re_acc, im_acc = evalf(base, prec + 5, options)
# Real to integer power
if re and not im:
return mpf_pow_int(re, p, target_prec), None, target_prec, None
# (x*I)**n = I**n * x**n
if im and not re:
z = mpf_pow_int(im, p, target_prec)
case = p % 4
if case == 0:
return z, None, target_prec, None
if case == 1:
return None, z, None, target_prec
if case == 2:
return mpf_neg(z), None, target_prec, None
if case == 3:
return None, mpf_neg(z), None, target_prec
# Zero raised to an integer power
if not re:
return None, None, None, None
# General complex number to arbitrary integer power
re, im = libmp.mpc_pow_int((re, im), p, prec)
# Assumes full accuracy in input
return finalize_complex(re, im, target_prec)
# Pure square root
if exp is S.Half:
xre, xim, _, _ = evalf(base, prec + 5, options)
# General complex square root
if xim:
re, im = libmp.mpc_sqrt((xre or fzero, xim), prec)
return finalize_complex(re, im, prec)
if not xre:
return None, None, None, None
# Square root of a negative real number
if mpf_lt(xre, fzero):
return None, mpf_sqrt(mpf_neg(xre), prec), None, prec
# Positive square root
return mpf_sqrt(xre, prec), None, prec, None
# We first evaluate the exponent to find its magnitude
# This determines the working precision that must be used
prec += 10
yre, yim, _, _ = evalf(exp, prec, options)
# Special cases: x**0
if not (yre or yim):
return fone, None, prec, None
ysize = fastlog(yre)
# Restart if too big
# XXX: prec + ysize might exceed maxprec
if ysize > 5:
prec += ysize
yre, yim, _, _ = evalf(exp, prec, options)
# Pure exponential function; no need to evalf the base
if base is S.Exp1:
if yim:
re, im = libmp.mpc_exp((yre or fzero, yim), prec)
return finalize_complex(re, im, target_prec)
return mpf_exp(yre, target_prec), None, target_prec, None
xre, xim, _, _ = evalf(base, prec + 5, options)
# 0**y
if not (xre or xim):
return None, None, None, None
# (real ** complex) or (complex ** complex)
if yim:
re, im = libmp.mpc_pow(
(xre or fzero, xim or fzero), (yre or fzero, yim),
target_prec)
return finalize_complex(re, im, target_prec)
# complex ** real
if xim:
re, im = libmp.mpc_pow_mpf((xre or fzero, xim), yre, target_prec)
return finalize_complex(re, im, target_prec)
# negative ** real
elif mpf_lt(xre, fzero):
re, im = libmp.mpc_pow_mpf((xre, fzero), yre, target_prec)
return finalize_complex(re, im, target_prec)
# positive ** real
else:
return mpf_pow(xre, yre, target_prec), None, target_prec, None
#----------------------------------------------------------------------------#
# #
# Special functions #
# #
#----------------------------------------------------------------------------#
def evalf_trig(v, prec, options):
"""
This function handles sin and cos of complex arguments.
TODO: should also handle tan of complex arguments.
"""
if v.func is C.cos:
func = mpf_cos
elif v.func is C.sin:
func = mpf_sin
else:
raise NotImplementedError
arg = v.args[0]
# 20 extra bits is possibly overkill. It does make the need
# to restart very unlikely
xprec = prec + 20
re, im, re_acc, im_acc = evalf(arg, xprec, options)
if im:
if 'subs' in options:
v = v.subs(options['subs'])
return evalf(v._eval_evalf(prec), prec, options)
if not re:
if v.func is C.cos:
return fone, None, prec, None
elif v.func is C.sin:
return None, None, None, None
else:
raise NotImplementedError
# For trigonometric functions, we are interested in the
# fixed-point (absolute) accuracy of the argument.
xsize = fastlog(re)
# Magnitude <= 1.0. OK to compute directly, because there is no
# danger of hitting the first root of cos (with sin, magnitude
# <= 2.0 would actually be ok)
if xsize < 1:
return func(re, prec, rnd), None, prec, None
# Very large
if xsize >= 10:
xprec = prec + xsize
re, im, re_acc, im_acc = evalf(arg, xprec, options)
# Need to repeat in case the argument is very close to a
# multiple of pi (or pi/2), hitting close to a root
while 1:
y = func(re, prec, rnd)
ysize = fastlog(y)
gap = -ysize
accuracy = (xprec - xsize) - gap
if accuracy < prec:
if options.get('verbose'):
print("SIN/COS", accuracy, "wanted", prec, "gap", gap)
print(to_str(y, 10))
if xprec > options.get('maxprec', DEFAULT_MAXPREC):
return y, None, accuracy, None
xprec += gap
re, im, re_acc, im_acc = evalf(arg, xprec, options)
continue
else:
return y, None, prec, None
def evalf_log(expr, prec, options):
arg = expr.args[0]
workprec = prec + 10
xre, xim, xacc, _ = evalf(arg, workprec, options)
if xim:
# XXX: use get_abs etc instead
re = evalf_log(
C.log(C.Abs(arg, evaluate=False), evaluate=False), prec, options)
im = mpf_atan2(xim, xre or fzero, prec)
return re[0], im, re[2], prec
imaginary_term = (mpf_cmp(xre, fzero) < 0)
re = mpf_log(mpf_abs(xre), prec, rnd)
size = fastlog(re)
if prec - size > workprec:
# We actually need to compute 1+x accurately, not x
arg = C.Add(S.NegativeOne, arg, evaluate=False)
xre, xim, _, _ = evalf_add(arg, prec, options)
prec2 = workprec - fastlog(xre)
# xre is now x - 1 so we add 1 back here to calculate x
re = mpf_log(mpf_abs(mpf_add(xre, fone, prec2)), prec, rnd)
re_acc = prec
if imaginary_term:
return re, mpf_pi(prec), re_acc, prec
else:
return re, None, re_acc, None
def evalf_atan(v, prec, options):
arg = v.args[0]
xre, xim, reacc, imacc = evalf(arg, prec + 5, options)
if xre is xim is None:
return (None,)*4
if xim:
raise NotImplementedError
return mpf_atan(xre, prec, rnd), None, prec, None
def evalf_subs(prec, subs):
""" Change all Float entries in `subs` to have precision prec. """
newsubs = {}
for a, b in subs.items():
b = S(b)
if b.is_Float:
b = b._eval_evalf(prec)
newsubs[a] = b
return newsubs
def evalf_piecewise(expr, prec, options):
if 'subs' in options:
expr = expr.subs(evalf_subs(prec, options['subs']))
newopts = options.copy()
del newopts['subs']
if hasattr(expr, 'func'):
return evalf(expr, prec, newopts)
if type(expr) == float:
return evalf(C.Float(expr), prec, newopts)
if type(expr) == int:
return evalf(C.Integer(expr), prec, newopts)
# We still have undefined symbols
raise NotImplementedError
def evalf_bernoulli(expr, prec, options):
arg = expr.args[0]
if not arg.is_Integer:
raise ValueError("Bernoulli number index must be an integer")
n = int(arg)
b = mpf_bernoulli(n, prec, rnd)
if b == fzero:
return None, None, None, None
return b, None, prec, None
#----------------------------------------------------------------------------#
# #
# High-level operations #
# #
#----------------------------------------------------------------------------#
def as_mpmath(x, prec, options):
x = sympify(x)
if isinstance(x, C.Zero):
return mpf(0)
if isinstance(x, C.Infinity):
return mpf('inf')
if isinstance(x, C.NegativeInfinity):
return mpf('-inf')
# XXX
re, im, _, _ = evalf(x, prec, options)
if im:
return mpc(re or fzero, im)
return mpf(re)
def do_integral(expr, prec, options):
func = expr.args[0]
x, xlow, xhigh = expr.args[1]
if xlow == xhigh:
xlow = xhigh = 0
elif x not in func.free_symbols:
# only the difference in limits matters in this case
# so if there is a symbol in common that will cancel
# out when taking the difference, then use that
# difference
if xhigh.free_symbols & xlow.free_symbols:
diff = xhigh - xlow
if not diff.free_symbols:
xlow, xhigh = 0, diff
orig = mp.prec
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
options['maxprec'] = min(oldmaxprec, 2*prec)
try:
mp.prec = prec + 5
xlow = as_mpmath(xlow, prec + 15, options)
xhigh = as_mpmath(xhigh, prec + 15, options)
# Integration is like summation, and we can phone home from
# the integrand function to update accuracy summation style
# Note that this accuracy is inaccurate, since it fails
# to account for the variable quadrature weights,
# but it is better than nothing
have_part = [False, False]
max_real_term = [MINUS_INF]
max_imag_term = [MINUS_INF]
def f(t):
re, im, re_acc, im_acc = evalf(func, mp.prec, {'subs': {x: t}})
have_part[0] = re or have_part[0]
have_part[1] = im or have_part[1]
max_real_term[0] = max(max_real_term[0], fastlog(re))
max_imag_term[0] = max(max_imag_term[0], fastlog(im))
if im:
return mpc(re or fzero, im)
return mpf(re or fzero)
if options.get('quad') == 'osc':
A = C.Wild('A', exclude=[x])
B = C.Wild('B', exclude=[x])
D = C.Wild('D')
m = func.match(C.cos(A*x + B)*D)
if not m:
m = func.match(C.sin(A*x + B)*D)
if not m:
raise ValueError("An integrand of the form sin(A*x+B)*f(x) "
"or cos(A*x+B)*f(x) is required for oscillatory quadrature")
period = as_mpmath(2*S.Pi/m[A], prec + 15, options)
result = quadosc(f, [xlow, xhigh], period=period)
# XXX: quadosc does not do error detection yet
quadrature_error = MINUS_INF
else:
result, quadrature_error = quadts(f, [xlow, xhigh], error=1)
quadrature_error = fastlog(quadrature_error._mpf_)
finally:
options['maxprec'] = oldmaxprec
mp.prec = orig
if have_part[0]:
re = result.real._mpf_
if re == fzero:
re, re_acc = scaled_zero(
min(-prec, -max_real_term[0], -quadrature_error))
re = scaled_zero(re) # handled ok in evalf_integral
else:
re_acc = -max(max_real_term[0] - fastlog(re) -
prec, quadrature_error)
else:
re, re_acc = None, None
if have_part[1]:
im = result.imag._mpf_
if im == fzero:
im, im_acc = scaled_zero(
min(-prec, -max_imag_term[0], -quadrature_error))
im = scaled_zero(im) # handled ok in evalf_integral
else:
im_acc = -max(max_imag_term[0] - fastlog(im) -
prec, quadrature_error)
else:
im, im_acc = None, None
result = re, im, re_acc, im_acc
return result
def evalf_integral(expr, prec, options):
limits = expr.limits
if len(limits) != 1 or len(limits[0]) != 3:
raise NotImplementedError
workprec = prec
i = 0
maxprec = options.get('maxprec', INF)
while 1:
result = do_integral(expr, workprec, options)
accuracy = complex_accuracy(result)
if accuracy >= prec: # achieved desired precision
break
if workprec >= maxprec: # can't increase accuracy any more
break
if accuracy == -1:
# maybe the answer really is zero and maybe we just haven't increased
# the precision enough. So increase by doubling to not take too long
# to get to maxprec.
workprec *= 2
else:
workprec += max(prec, 2**i)
workprec = min(workprec, maxprec)
i += 1
return result
def check_convergence(numer, denom, n):
"""
Returns (h, g, p) where
-- h is:
> 0 for convergence of rate 1/factorial(n)**h
< 0 for divergence of rate factorial(n)**(-h)
= 0 for geometric or polynomial convergence or divergence
-- abs(g) is:
> 1 for geometric convergence of rate 1/h**n
< 1 for geometric divergence of rate h**n
= 1 for polynomial convergence or divergence
(g < 0 indicates an alternating series)
-- p is:
> 1 for polynomial convergence of rate 1/n**h
<= 1 for polynomial divergence of rate n**(-h)
"""
npol = C.Poly(numer, n)
dpol = C.Poly(denom, n)
p = npol.degree()
q = dpol.degree()
rate = q - p
if rate:
return rate, None, None
constant = dpol.LC() / npol.LC()
if abs(constant) != 1:
return rate, constant, None
if npol.degree() == dpol.degree() == 0:
return rate, constant, 0
pc = npol.all_coeffs()[1]
qc = dpol.all_coeffs()[1]
return rate, constant, (qc - pc)/dpol.LC()
def hypsum(expr, n, start, prec):
"""
Sum a rapidly convergent infinite hypergeometric series with
given general term, e.g. e = hypsum(1/factorial(n), n). The
quotient between successive terms must be a quotient of integer
polynomials.
"""
from sympy import hypersimp, lambdify
# TODO: This should be removed for the release of 0.7.7, see issue #7853
from functools import partial
lambdify = partial(lambdify, default_array=True)
if start:
expr = expr.subs(n, n + start)
hs = hypersimp(expr, n)
if hs is None:
raise NotImplementedError("a hypergeometric series is required")
num, den = hs.as_numer_denom()
func1 = lambdify(n, num)
func2 = lambdify(n, den)
h, g, p = check_convergence(num, den, n)
if h < 0:
raise ValueError("Sum diverges like (n!)^%i" % (-h))
# Direct summation if geometric or faster
if h > 0 or (h == 0 and abs(g) > 1):
term = expr.subs(n, 0)
term = (MPZ(term.p) << prec) // term.q
s = term
k = 1
while abs(term) > 5:
term *= MPZ(func1(k - 1))
term //= MPZ(func2(k - 1))
s += term
k += 1
return from_man_exp(s, -prec)
else:
alt = g < 0
if abs(g) < 1:
raise ValueError("Sum diverges like (%i)^n" % abs(1/g))
if p < 1 or (p == 1 and not alt):
raise ValueError("Sum diverges like n^%i" % (-p))
# We have polynomial convergence: use Richardson extrapolation
# Need to use at least quad precision because a lot of cancellation
# might occur in the extrapolation process
prec2 = 4*prec
term = expr.subs(n, 0)
term = (MPZ(term.p) << prec2) // term.q
def summand(k, _term=[term]):
if k:
k = int(k)
_term[0] *= MPZ(func1(k - 1))
_term[0] //= MPZ(func2(k - 1))
return make_mpf(from_man_exp(_term[0], -prec2))
orig = mp.prec
try:
mp.prec = prec
v = nsum(summand, [0, mpmath_inf], method='richardson')
finally:
mp.prec = orig
return v._mpf_
def evalf_prod(expr, prec, options):
if all((l[1] - l[2]).is_Integer for l in expr.limits):
re, im, re_acc, im_acc = evalf(expr.doit(), prec=prec, options=options)
else:
re, im, re_acc, im_acc = evalf(expr.rewrite(C.Sum), prec=prec, options=options)
return re, im, re_acc, im_acc
def evalf_sum(expr, prec, options):
if 'subs' in options:
expr = expr.subs(options['subs'])
func = expr.function
limits = expr.limits
if len(limits) != 1 or len(limits[0]) != 3:
raise NotImplementedError
if func is S.Zero:
return mpf(0), None, None, None
prec2 = prec + 10
try:
n, a, b = limits[0]
if b != S.Infinity or a != int(a):
raise NotImplementedError
# Use fast hypergeometric summation if possible
v = hypsum(func, n, int(a), prec2)
delta = prec - fastlog(v)
if fastlog(v) < -10:
v = hypsum(func, n, int(a), delta)
return v, None, min(prec, delta), None
except NotImplementedError:
# Euler-Maclaurin summation for general series
eps = C.Float(2.0)**(-prec)
for i in range(1, 5):
m = n = 2**i * prec
s, err = expr.euler_maclaurin(m=m, n=n, eps=eps,
eval_integral=False)
err = err.evalf()
if err <= eps:
break
err = fastlog(evalf(abs(err), 20, options)[0])
re, im, re_acc, im_acc = evalf(s, prec2, options)
if re_acc is None:
re_acc = -err
if im_acc is None:
im_acc = -err
return re, im, re_acc, im_acc
#----------------------------------------------------------------------------#
# #
# Symbolic interface #
# #
#----------------------------------------------------------------------------#
def evalf_symbol(x, prec, options):
val = options['subs'][x]
if isinstance(val, mpf):
if not val:
return None, None, None, None
return val._mpf_, None, prec, None
else:
if not '_cache' in options:
options['_cache'] = {}
cache = options['_cache']
cached, cached_prec = cache.get(x.name, (None, MINUS_INF))
if cached_prec >= prec:
return cached
v = evalf(sympify(val), prec, options)
cache[x.name] = (v, prec)
return v
evalf_table = None
def _create_evalf_table():
global evalf_table
evalf_table = {
C.Symbol: evalf_symbol,
C.Dummy: evalf_symbol,
C.Float: lambda x, prec, options: (x._mpf_, None, prec, None),
C.Rational: lambda x, prec, options: (from_rational(x.p, x.q, prec), None, prec, None),
C.Integer: lambda x, prec, options: (from_int(x.p, prec), None, prec, None),
C.Zero: lambda x, prec, options: (None, None, prec, None),
C.One: lambda x, prec, options: (fone, None, prec, None),
C.Half: lambda x, prec, options: (fhalf, None, prec, None),
C.Pi: lambda x, prec, options: (mpf_pi(prec), None, prec, None),
C.Exp1: lambda x, prec, options: (mpf_e(prec), None, prec, None),
C.ImaginaryUnit: lambda x, prec, options: (None, fone, None, prec),
C.NegativeOne: lambda x, prec, options: (fnone, None, prec, None),
C.NaN : lambda x, prec, options: (fnan, None, prec, None),
C.exp: lambda x, prec, options: evalf_pow(C.Pow(S.Exp1, x.args[0],
evaluate=False), prec, options),
C.cos: evalf_trig,
C.sin: evalf_trig,
C.Add: evalf_add,
C.Mul: evalf_mul,
C.Pow: evalf_pow,
C.log: evalf_log,
C.atan: evalf_atan,
C.Abs: evalf_abs,
C.re: evalf_re,
C.im: evalf_im,
C.floor: evalf_floor,
C.ceiling: evalf_ceiling,
C.Integral: evalf_integral,
C.Sum: evalf_sum,
C.Product: evalf_prod,
C.Piecewise: evalf_piecewise,
C.bernoulli: evalf_bernoulli,
}
def evalf(x, prec, options):
from sympy import re as re_, im as im_
try:
rf = evalf_table[x.func]
r = rf(x, prec, options)
except KeyError:
try:
# Fall back to ordinary evalf if possible
if 'subs' in options:
x = x.subs(evalf_subs(prec, options['subs']))
re, im = x._eval_evalf(prec).as_real_imag()
if re.has(re_) or im.has(im_):
raise NotImplementedError
if re == 0:
re = None
reprec = None
else:
re = re._to_mpmath(prec, allow_ints=False)._mpf_
reprec = prec
if im == 0:
im = None
imprec = None
else:
im = im._to_mpmath(prec, allow_ints=False)._mpf_
imprec = prec
r = re, im, reprec, imprec
except AttributeError:
raise NotImplementedError
if options.get("verbose"):
print("### input", x)
print("### output", to_str(r[0] or fzero, 50))
print("### raw", r ) # r[0], r[2]
print()
chop = options.get('chop', False)
if chop:
if chop is True:
chop_prec = prec
else:
# convert (approximately) from given tolerance;
# the formula here will will make 1e-i rounds to 0 for
# i in the range +/-27 while 2e-i will not be chopped
chop_prec = int(round(-3.321*math.log10(chop) + 2.5))
if chop_prec == 3:
chop_prec -= 1
r = chop_parts(r, chop_prec)
if options.get("strict"):
check_target(x, r, prec)
return r
class EvalfMixin(object):
"""Mixin class adding evalf capabililty."""
__slots__ = []
def evalf(self, n=15, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False):
"""
Evaluate the given formula to an accuracy of n digits.
Optional keyword arguments:
subs=<dict>
Substitute numerical values for symbols, e.g.
subs={x:3, y:1+pi}. The substitutions must be given as a
dictionary.
maxn=<integer>
Allow a maximum temporary working precision of maxn digits
(default=100)
chop=<bool>
Replace tiny real or imaginary parts in subresults
by exact zeros (default=False)
strict=<bool>
Raise PrecisionExhausted if any subresult fails to evaluate
to full accuracy, given the available maxprec
(default=False)
quad=<str>
Choose algorithm for numerical quadrature. By default,
tanh-sinh quadrature is used. For oscillatory
integrals on an infinite interval, try quad='osc'.
verbose=<bool>
Print debug information (default=False)
"""
n = n if n is not None else 15
if subs and is_sequence(subs):
raise TypeError('subs must be given as a dictionary')
# for sake of sage that doesn't like evalf(1)
if n == 1 and isinstance(self, C.Number):
from sympy.core.expr import _mag
rv = self.evalf(2, subs, maxn, chop, strict, quad, verbose)
m = _mag(rv)
rv = rv.round(1 - m)
return rv
if not evalf_table:
_create_evalf_table()
prec = dps_to_prec(n)
options = {'maxprec': max(prec, int(maxn*LG10)), 'chop': chop,
'strict': strict, 'verbose': verbose}
if subs is not None:
options['subs'] = subs
if quad is not None:
options['quad'] = quad
try:
result = evalf(self, prec + 4, options)
except NotImplementedError:
# Fall back to the ordinary evalf
v = self._eval_evalf(prec)
if v is None:
return self
try:
# If the result is numerical, normalize it
result = evalf(v, prec, options)
except NotImplementedError:
# Probably contains symbols or unknown functions
return v
re, im, re_acc, im_acc = result
if re:
p = max(min(prec, re_acc), 1)
#re = mpf_pos(re, p, rnd)
re = C.Float._new(re, p)
else:
re = S.Zero
if im:
p = max(min(prec, im_acc), 1)
#im = mpf_pos(im, p, rnd)
im = C.Float._new(im, p)
return re + im*S.ImaginaryUnit
else:
return re
n = evalf
def _evalf(self, prec):
"""Helper for evalf. Does the same thing but takes binary precision"""
r = self._eval_evalf(prec)
if r is None:
r = self
return r
def _eval_evalf(self, prec):
return
def _to_mpmath(self, prec, allow_ints=True):
# mpmath functions accept ints as input
errmsg = "cannot convert to mpmath number"
if allow_ints and self.is_Integer:
return self.p
if hasattr(self, '_as_mpf_val'):
return make_mpf(self._as_mpf_val(prec))
try:
re, im, _, _ = evalf(self, prec, {})
if im:
if not re:
re = fzero
return make_mpc((re, im))
elif re:
return make_mpf(re)
else:
return make_mpf(fzero)
except NotImplementedError:
v = self._eval_evalf(prec)
if v is None:
raise ValueError(errmsg)
if v.is_Float:
return make_mpf(v._mpf_)
# Number + Number*I is also fine
re, im = v.as_real_imag()
if allow_ints and re.is_Integer:
re = from_int(re.p)
elif re.is_Float:
re = re._mpf_
else:
raise ValueError(errmsg)
if allow_ints and im.is_Integer:
im = from_int(im.p)
elif im.is_Float:
im = im._mpf_
else:
raise ValueError(errmsg)
return make_mpc((re, im))
def N(x, n=15, **options):
"""
Calls x.evalf(n, \*\*options).
Both .n() and N() are equivalent to .evalf(); use the one that you like better.
See also the docstring of .evalf() for information on the options.
Examples
========
>>> from sympy import Sum, oo, N
>>> from sympy.abc import k
>>> Sum(1/k**k, (k, 1, oo))
Sum(k**(-k), (k, 1, oo))
>>> N(_, 4)
1.291
"""
return sympify(x).evalf(n, **options)
| bsd-3-clause | 528,281,993,822,744,960 | 32.421644 | 99 | 0.536449 | false |
auduny/chains | lib/chains/common/cusb.py | 1 | 4584 | import usb.core
import usb.util
usb_iclass_map = {
usb.CLASS_PER_INTERFACE: 'PerInterface', # 0
usb.CLASS_AUDIO: 'Audio', # 1
usb.CLASS_COMM: 'Comm', # 2
usb.CLASS_HID: 'HID', # 3
usb.CLASS_PRINTER: 'Printer', # 7
usb.CLASS_MASS_STORAGE: 'MassStorage', # 8
usb.CLASS_HUB: 'Hub', # 9
usb.CLASS_DATA: 'Data',
usb.CLASS_VENDOR_SPEC: 'Vendor',
}
usb_iproto_map = {
'PerInterface': {
},
'Audio': {
},
'Comm': {
},
'HID': {
0: 'raw',
1: 'keyboard',
2: 'mouse',
},
'Printer': {
},
'MassStorage': {
},
'Hub': {
0: 'full_speed',
},
'Data': {
},
'Vendor': {
},
}
usb_services = {
(0x16c0, 0x05df): [
{'type': 'relay', 'name': 'usbrelay'},
],
(0x045e, 0x028e): [
{'type': 'joystick', 'name': 'xbox360'},
],
(0x046d, 0xc21f): [
{'type': 'joystick', 'name': 'f710'}, # Logitech F710 controller
],
(0x08ff, 0x0009): [
{'type': 'rfid', 'name': 'rfid_reader'},
],
}
def find_types(**kwargs):
global usb_iclass_map
global usb_iproto_map
dev = usb.core.find(find_all=True, **kwargs)
all_info = "All USB services:\n"
types = {}
dev_desc = {}
for service in dev:
#all_info += str(service) + '\n'
for cindex, configuration in enumerate(service):
# print 'Configuration: %d' % cindex
for interface in configuration:
# print interface.__dict__
bclass = 'unknown'
bproto = 'unknown'
if interface.bInterfaceClass in usb_iclass_map:
bclass = usb_iclass_map[interface.bInterfaceClass]
if interface.bInterfaceProtocol in usb_iproto_map[bclass]:
bproto = usb_iproto_map[bclass][interface.bInterfaceProtocol]
#else:
# bproto = 'unknown'
dev_desc = {
'bus': service.bus,
'class': bclass,
'address': service.address,
'product_id': service.idProduct,
'vendor_id': service.idVendor,
'configuration': cindex,
'interface': interface.bInterfaceNumber
}
# Add service strings to interface description
dev_desc.update(service_strings(service.bus, service.address))
types.setdefault(bclass, {})
# Adding interface to full list
# print dev_desc
types[bclass].setdefault(bproto, []).append(dev_desc)
#print all_info
return types
def service_strings(bus, address):
""" Returns dictionary with service strings:
usb_str = {
'manufacturer_name': 'unknown',
'product_name': 'unknown',
'serialnumber': 'unkmown',
}
"""
dev = usb.core.find(bus=bus, address=address)
usb_str = {
'manufacturer_name': 'unknown',
'product_name': 'unknown',
'serialnumber': 'unkmown',
}
try:
if dev._manufacturer is None:
dev._manufacturer = usb.util.get_string(dev, dev.iManufacturer)
usb_str['manufacturer_name'] = dev._manufacturer
if dev._product is None:
dev._product = usb.util.get_string(dev, dev.iProduct)
usb_str['product_name'] = dev._product
if dev._serial_number is None:
dev._serial_number = usb.util.get_string(dev, dev.iSerialNumber)
usb_str['serialnumber'] = dev._serial_number
except:
pass
return usb_str
def find_mouse(**kwargs):
types = find_types(**kwargs)
if 'HID' in types:
if 'mouse' in types['HID']:
return types['HID']['mouse']
return False
def find_keyboard(**kwargs):
types = find_types(**kwargs)
if 'HID' in types:
if 'keyboard' in types['HID']:
return types['HID']['keyboard']
return False
def find_relay(**kwargs):
types = find_types(**kwargs)
if 'HID' in types:
if 'relay' in types['HID']:
return types['HID']['relay']
return False
def find_joystick(**kwargs):
types = find_types(**kwargs)
if 'HID' in types:
if 'joystick' in types['HID']:
return types['HID']['joystick']
return False
if __name__ == '__main__':
from pprint import pprint
import sys
# in_img = sys.argv[1]
# act = sys.argv[2]
pprint(find_types())
pprint(find_mouse())
| gpl-2.0 | -2,259,089,511,217,608,400 | 26.781818 | 85 | 0.52356 | false |
sahat/Wappalyzer | drivers/python/wappalyzer.py | 1 | 1417 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import PyV8
import urllib
from urlparse import urlparse
try:
import json
except ImportError:
import simplejson as json
class Wappalyzer(object):
def __init__(self, url):
self.file_dir = os.path.dirname(__file__)
f = open(os.path.join(self.file_dir, '../../share/apps.json'))
data = json.loads(f.read())
f.close()
self.categories = data['categories']
self.apps = data['apps']
self.url = url
def analyze(self):
ctxt = PyV8.JSContext()
ctxt.enter()
f1 = open(os.path.join(self.file_dir, '../php/js/wappalyzer.js'))
f2 = open(os.path.join(self.file_dir, '../php/js/driver.js'))
ctxt.eval(f1.read())
ctxt.eval(f2.read())
f1.close()
f2.close()
host = urlparse(self.url).hostname
html = urllib.urlopen(self.url).read()
data = {'host': host, 'url': self.url, 'html': html, 'headers': {}}
apps = json.dumps(self.apps)
categories = json.dumps(self.categories)
return ctxt.eval("w.apps = %s; w.categories = %s; w.driver.data = %s; w.driver.init();" % (apps, categories, json.dumps(data)))
if __name__ == '__main__':
try:
w = Wappalyzer(sys.argv[1])
print w.analyze()
except IndexError:
print ('Usage: python %s <url>' % sys.argv[0])
| gpl-3.0 | 1,305,981,902,608,662,500 | 25.735849 | 135 | 0.568102 | false |
ittner/sked | libsked/options.py | 1 | 2982 | # -*- coding: utf-8 -*-
# Sked - a wikish scheduler with Python and PyGTK
# (c) 2006-10 Alexandre Erwin Ittner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
"""
Options and configuration management module.
"""
from gtk import gdk
class OptionManager:
"""
Handles application options, including persistence.
"""
DB_KEY = "options"
def __init__(self, db, defaults = {}):
self._db = db
self._opts = {}
self.set_defaults(defaults)
self.load()
def set_defaults(self, defaults):
self._defs = {}
for k in defaults:
self._defs[k] = defaults[k]
def save(self):
self._db.set_key(OptionManager.DB_KEY, self._opts)
def load(self):
s = self._db.get_key(OptionManager.DB_KEY)
if s:
self._opts = s
def get_str(self, key):
if self._opts.has_key(key):
return self._opts[key]
elif self._defs.has_key(key):
return self._defs[key]
else:
return None
def set_str(self, key, value):
self._opts[key] = value
def get_int(self, key):
s = self.get_str(key)
if s != None:
return int(s)
return None
def set_int(self, key, value):
self.set_str(key, "%d" % value)
def get_bool(self, key):
v = self.get_int(key)
if v != None:
if v != 0:
return True
else:
return False
else:
return None
def set_bool(self, key, value):
if value == True:
self.set_int(key, 1)
else:
self.set_int(key, 0)
def get_color(self, key):
try:
s = self.get_str(key)
if s == None and self._defs.has_key(key):
s = self._defs[key]
if s == None:
return None
return gdk.color_parse(s)
except ValueError:
return None
def set_color(self, key, color):
self.set_str(key, "#%.2X%.2X%.2X" %
(color.red/256, color.green/256, color.blue/256))
def iterate(self):
keys = sorted(self._opts.keys())
for key in keys:
yield str(key), str(self._opts[key])
| gpl-2.0 | -8,001,215,012,241,772,000 | 26.611111 | 70 | 0.560698 | false |
Suwings/Yeinw | src/Suwings/security/oth_pxfilter.py | 1 | 7109 | # -*- coding: utf-8 -*-
"""
Python 富文本XSS过滤类
@package XssHtml
@version 0.1
@link http://phith0n.github.io/python-xss-filter
@since 20150407
@copyright (c) Phithon All Rights Reserved
Based on native Python module HTMLParser purifier of HTML, To Clear all javascript in html
You can use it in all python web framework
Written by Phithon <[email protected]> in 2015 and placed in the public domain.
phithon <[email protected]> 编写于20150407
From: XDSEC <www.xdsec.org> & 离别歌 <www.leavesongs.com>
GitHub Pages: https://github.com/phith0n/python-xss-filter
Usage:
parser = XssHtml()
parser.feed('<html code>')
parser.close()
html = parser.getHtml()
print html
Requirements
Python 2.6+ or 3.2+
Cannot defense xss in browser which is belowed IE7
浏览器版本:IE7+ 或其他浏览器,无法防御IE6及以下版本浏览器中的XSS
"""
import re
try:
from html.parser import HTMLParser
except:
from HTMLParser import HTMLParser
class XssHtml(HTMLParser):
allow_tags = ['a', 'img', 'br', 'strong', 'b', 'code', 'pre',
'p', 'div', 'em', 'span', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'blockquote', 'ul', 'ol', 'tr', 'th', 'td',
'hr', 'li', 'u', 'embed', 's', 'table', 'thead', 'tbody',
'caption', 'small', 'q', 'sup', 'sub','small']
common_attrs = ["style", "class", "name"]
nonend_tags = ["img", "hr", "br", "embed"]
tags_own_attrs = {
"img": ["src", "width", "height", "alt", "align"],
"a": ["href", "target", "rel", "title"],
"embed": ["src", "width", "height", "type", "allowfullscreen", "loop", "play", "wmode", "menu"],
"table": ["border", "cellpadding", "cellspacing"],
}
def __init__(self, allows = []):
HTMLParser.__init__(self)
self.allow_tags = allows if allows else self.allow_tags
self.result = []
self.start = []
self.data = []
def getHtml(self):
"""
Get the safe html code
"""
for i in range(0, len(self.result)):
self.data.append(self.result[i])
v = ''.join(self.data)
#二次增加
black_list = ['javascript:',"<script>","</script>"]
for z in black_list:
while v.find(z) != -1:
v = v.replace(z,"")
return v
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
def handle_starttag(self, tag, attrs):
if tag not in self.allow_tags:
return
end_diagonal = ' /' if tag in self.nonend_tags else ''
if not end_diagonal:
self.start.append(tag)
attdict = {}
for attr in attrs:
attdict[attr[0]] = attr[1]
attdict = self._wash_attr(attdict, tag)
if hasattr(self, "node_%s" % tag):
attdict = getattr(self, "node_%s" % tag)(attdict)
else:
attdict = self.node_default(attdict)
attrs = []
for (key, value) in attdict.items():
attrs.append('%s="%s"' % (key, self._htmlspecialchars(value)))
attrs = (' ' + ' '.join(attrs)) if attrs else ''
self.result.append('<' + tag + attrs + end_diagonal + '>')
def handle_endtag(self, tag):
if self.start and tag == self.start[len(self.start) - 1]:
self.result.append('</' + tag + '>')
self.start.pop()
def handle_data(self, data):
self.result.append(self._htmlspecialchars(data))
def handle_entityref(self, name):
if name.isalpha():
self.result.append("&%s;" % name)
def handle_charref(self, name):
if name.isdigit():
self.result.append("&#%s;" % name)
def node_default(self, attrs):
attrs = self._common_attr(attrs)
return attrs
def node_a(self, attrs):
attrs = self._common_attr(attrs)
attrs = self._get_link(attrs, "href")
attrs = self._set_attr_default(attrs, "target", "_blank")
attrs = self._limit_attr(attrs, {
"target": ["_blank", "_self"]
})
return attrs
def node_embed(self, attrs):
attrs = self._common_attr(attrs)
attrs = self._get_link(attrs, "src")
attrs = self._limit_attr(attrs, {
"type": ["application/x-shockwave-flash"],
"wmode": ["transparent", "window", "opaque"],
"play": ["true", "false"],
"loop": ["true", "false"],
"menu": ["true", "false"],
"allowfullscreen": ["true", "false"]
})
attrs["allowscriptaccess"] = "never"
attrs["allownetworking"] = "none"
return attrs
def _true_url(self, url):
prog = re.compile(r"^(http|https|ftp)://.+", re.I | re.S)
if prog.match(url):
return url
else:
return "http://%s" % url
def _true_style(self, style):
if style:
style = re.sub(r"(\\|&#|/\*|\*/)", "_", style)
style = re.sub(r"e.*x.*p.*r.*e.*s.*s.*i.*o.*n", "_", style)
return style
def _get_style(self, attrs):
if "style" in attrs:
attrs["style"] = self._true_style(attrs.get("style"))
return attrs
def _get_link(self, attrs, name):
if name in attrs:
attrs[name] = self._true_url(attrs[name])
return attrs
def _wash_attr(self, attrs, tag):
if tag in self.tags_own_attrs:
other = self.tags_own_attrs.get(tag)
else:
other = []
_attrs = {}
if attrs:
for (key, value) in attrs.items():
if key in self.common_attrs + other:
_attrs[key] = value
return _attrs
def _common_attr(self, attrs):
attrs = self._get_style(attrs)
return attrs
def _set_attr_default(self, attrs, name, default = ''):
if name not in attrs:
attrs[name] = default
return attrs
def _limit_attr(self, attrs, limit = {}):
for (key, value) in limit.items():
if key in attrs and attrs[key] not in value:
del attrs[key]
return attrs
def _htmlspecialchars(self, html):
return html.replace("<", "<")\
.replace(">", ">")\
.replace('"', """)\
.replace("'", "'")
if "__main__" == __name__:
parser = XssHtml()
parser.feed("""
<p><img src=1 onerror=alert(/xss/)></p><div class="left">
<a href='javascript:prompt(1)'><br />hehe</a></div>
<p id="test" onmouseover="alert(1)">><<<><><><<///||\\ds\r\n<br><>>>M<svg>
<a href="https://www.baidu.com" target="self">MM</a></p>
<embed src='javascript:alert(/hehe/)' allowscriptaccess=always />
<a scr='javascript:alert(1)' href='ftp://www.baidu.com:8080/' style='color:red;'>
<p id='java' onload='javasjavascript:cript:alert(1)' >><script<script>>>>Aw ]我好<><<>></script> </p>
<sdsw id='jjj'>9999</sdsw>
""")
parser.close()
print(parser.getHtml()) | gpl-3.0 | 8,411,199,126,742,387,000 | 32.428571 | 107 | 0.529563 | false |
felipenaselva/felipe.repository | script.module.universalscrapers/lib/universalscrapers/scraperplugins/housemovies.py | 1 | 3241 | import requests
import re, base64
import xbmc
from ..scraper import Scraper
from ..common import clean_title,clean_search,random_agent
class housemovie(Scraper):
domains = ['https://housemovie.to/']
name = "Housemovies"
sources = []
def __init__(self):
self.base_link = 'https://housemovie.to'
def scrape_movie(self, title, year, imdb, debrid = False):
try:
search_id = clean_search(title.lower()) # use 'clean_search' to get clean title
#(movie name keeping spaces removing excess characters)
start_url = '%s/search?q=%s' %(self.base_link,search_id.replace(' ','+')) # construct search url attributes using site url
# print '::::::::::::: START URL '+start_url # print to log to confirm
headers={'User-Agent':random_agent()}
html = requests.get(start_url,headers=headers,timeout=5).content # open start_url
match = re.compile('<span class="is_dislike">.+?<a href="(.+?)" class="fig_holder">.+?<div class="cover-label">.+?<spa.+?>(.+?)</span>.+?<span class="item_name">(.+?)</span>.+?<span class="item_ganre">(.+?),',re.DOTALL).findall(html)
for url, quality, name, year_check in match:
if clean_title(search_id).lower() == clean_title(name).lower(): # confirm name use 'clean_title' this will remove all unwanted
if year_check == year:
url = self.base_link+url
if not 'coming soon' in quality.lower():
self.get_source(url, quality) # send url to next stage
return self.sources
except Exception as e:
print e
pass
return[]
def get_source(self, item_url, quality):
try:
headers={'User-Agent':random_agent()}
html = requests.get(item_url,headers=headers,timeout=5).content # open page passed
try:
mainlink = re.findall('<iframe src="(.+?)"',html).findall(OPEN)[0]
source = re.findall('//(.+?)/',str(mainlink))[0]
self.sources.append({'source': source, 'quality': quality, 'scraper': self.name, 'url': link,'direct': False}) #this line will depend what sent
except:
pass
match_links = re.findall('data-link="(.+?)" rel="nofollow">(.+?)</a>',html)
for link,name in match_links:
link = base64.decodestring(link)
link = re.findall('"link":"(.+?)"',str(link.replace('\\','')))[0]
source = re.findall('//(.+?)/',str(link))[0]
self.sources.append({'source': source, 'quality': quality, 'scraper': self.name, 'url': link,'direct': False}) #this line will depend what sent
except Exception as e:
print e
#housemovie().scrape_movie('logan','2017','')
# you will need to regex/split or rename to get host name if required from link unless available on page it self
| gpl-2.0 | 5,268,606,886,261,176,000 | 53.016667 | 245 | 0.523604 | false |
pylint-bot/astroid-unofficial | astroid/protocols.py | 1 | 21725 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""this module contains a set of functions to handle python protocols for nodes
where it makes sense.
"""
import collections
import operator
import sys
import six
from astroid import arguments
from astroid import bases
from astroid import context as contextmod
from astroid import exceptions
from astroid import decorators
from astroid import node_classes
from astroid import helpers
from astroid import nodes
from astroid import util
def _reflected_name(name):
return "__r" + name[2:]
def _augmented_name(name):
return "__i" + name[2:]
_CONTEXTLIB_MGR = 'contextlib.contextmanager'
BIN_OP_METHOD = {'+': '__add__',
'-': '__sub__',
'/': '__div__' if six.PY2 else '__truediv__',
'//': '__floordiv__',
'*': '__mul__',
'**': '__pow__',
'%': '__mod__',
'&': '__and__',
'|': '__or__',
'^': '__xor__',
'<<': '__lshift__',
'>>': '__rshift__',
'@': '__matmul__'
}
REFLECTED_BIN_OP_METHOD = {
key: _reflected_name(value)
for (key, value) in BIN_OP_METHOD.items()
}
AUGMENTED_OP_METHOD = {
key + "=": _augmented_name(value)
for (key, value) in BIN_OP_METHOD.items()
}
UNARY_OP_METHOD = {'+': '__pos__',
'-': '__neg__',
'~': '__invert__',
'not': None, # XXX not '__nonzero__'
}
_UNARY_OPERATORS = {
'+': operator.pos,
'-': operator.neg,
'~': operator.invert,
'not': operator.not_,
}
def _infer_unary_op(obj, op):
func = _UNARY_OPERATORS[op]
value = func(obj)
return nodes.const_factory(value)
nodes.Tuple.infer_unary_op = lambda self, op: _infer_unary_op(tuple(self.elts), op)
nodes.List.infer_unary_op = lambda self, op: _infer_unary_op(self.elts, op)
nodes.Set.infer_unary_op = lambda self, op: _infer_unary_op(set(self.elts), op)
nodes.Const.infer_unary_op = lambda self, op: _infer_unary_op(self.value, op)
nodes.Dict.infer_unary_op = lambda self, op: _infer_unary_op(dict(self.items), op)
# Binary operations
BIN_OP_IMPL = {'+': lambda a, b: a + b,
'-': lambda a, b: a - b,
'/': lambda a, b: a / b,
'//': lambda a, b: a // b,
'*': lambda a, b: a * b,
'**': lambda a, b: a ** b,
'%': lambda a, b: a % b,
'&': lambda a, b: a & b,
'|': lambda a, b: a | b,
'^': lambda a, b: a ^ b,
'<<': lambda a, b: a << b,
'>>': lambda a, b: a >> b,
}
if sys.version_info >= (3, 5):
# MatMult is available since Python 3.5+.
BIN_OP_IMPL['@'] = operator.matmul
for _KEY, _IMPL in list(BIN_OP_IMPL.items()):
BIN_OP_IMPL[_KEY + '='] = _IMPL
@decorators.yes_if_nothing_inferred
def const_infer_binary_op(self, operator, other, context, _):
not_implemented = nodes.Const(NotImplemented)
if isinstance(other, nodes.Const):
try:
impl = BIN_OP_IMPL[operator]
try:
yield nodes.const_factory(impl(self.value, other.value))
except TypeError:
# ArithmeticError is not enough: float >> float is a TypeError
yield not_implemented
except Exception: # pylint: disable=broad-except
yield util.Uninferable
except TypeError:
yield not_implemented
elif isinstance(self.value, six.string_types) and operator == '%':
# TODO(cpopa): implement string interpolation later on.
yield util.Uninferable
else:
yield not_implemented
nodes.Const.infer_binary_op = const_infer_binary_op
def _multiply_seq_by_int(self, other, context):
node = self.__class__()
elts = []
for elt in self.elts:
infered = helpers.safe_infer(elt, context)
if infered is None:
infered = util.Uninferable
elts.append(infered)
node.elts = elts * other.value
return node
def _filter_uninferable_nodes(elts, context):
for elt in elts:
if elt is util.Uninferable:
yield elt
else:
for inferred in elt.infer(context):
yield inferred
@decorators.yes_if_nothing_inferred
def tl_infer_binary_op(self, operator, other, context, method):
not_implemented = nodes.Const(NotImplemented)
if isinstance(other, self.__class__) and operator == '+':
node = self.__class__()
elts = list(_filter_uninferable_nodes(self.elts, context))
elts += list(_filter_uninferable_nodes(other.elts, context))
node.elts = elts
yield node
elif isinstance(other, nodes.Const) and operator == '*':
if not isinstance(other.value, int):
yield not_implemented
return
yield _multiply_seq_by_int(self, other, context)
elif isinstance(other, bases.Instance) and operator == '*':
# Verify if the instance supports __index__.
as_index = helpers.class_instance_as_index(other)
if not as_index:
yield util.Uninferable
else:
yield _multiply_seq_by_int(self, as_index, context)
else:
yield not_implemented
nodes.Tuple.infer_binary_op = tl_infer_binary_op
nodes.List.infer_binary_op = tl_infer_binary_op
@decorators.yes_if_nothing_inferred
def instance_infer_binary_op(self, operator, other, context, method):
return method.infer_call_result(self, context)
bases.Instance.infer_binary_op = instance_infer_binary_op
# assignment ##################################################################
"""the assigned_stmts method is responsible to return the assigned statement
(e.g. not inferred) according to the assignment type.
The `asspath` argument is used to record the lhs path of the original node.
For instance if we want assigned statements for 'c' in 'a, (b,c)', asspath
will be [1, 1] once arrived to the Assign node.
The `context` argument is the current inference context which should be given
to any intermediary inference necessary.
"""
def _resolve_looppart(parts, asspath, context):
"""recursive function to resolve multiple assignments on loops"""
asspath = asspath[:]
index = asspath.pop(0)
for part in parts:
if part is util.Uninferable:
continue
# XXX handle __iter__ and log potentially detected errors
if not hasattr(part, 'itered'):
continue
try:
itered = part.itered()
except TypeError:
continue # XXX log error
for stmt in itered:
try:
assigned = stmt.getitem(index, context)
except (AttributeError, IndexError):
continue
except TypeError: # stmt is unsubscriptable Const
continue
if not asspath:
# we achieved to resolved the assignment path,
# don't infer the last part
yield assigned
elif assigned is util.Uninferable:
break
else:
# we are not yet on the last part of the path
# search on each possibly inferred value
try:
for inferred in _resolve_looppart(assigned.infer(context),
asspath, context):
yield inferred
except exceptions.InferenceError:
break
@decorators.raise_if_nothing_inferred
def for_assigned_stmts(self, node=None, context=None, asspath=None):
if asspath is None:
for lst in self.iter.infer(context):
if isinstance(lst, (nodes.Tuple, nodes.List)):
for item in lst.elts:
yield item
else:
for inferred in _resolve_looppart(self.iter.infer(context),
asspath, context):
yield inferred
# Explicit StopIteration to return error information, see comment
# in raise_if_nothing_inferred.
raise StopIteration(dict(node=self, unknown=node,
assign_path=asspath, context=context))
nodes.For.assigned_stmts = for_assigned_stmts
nodes.Comprehension.assigned_stmts = for_assigned_stmts
def sequence_assigned_stmts(self, node=None, context=None, asspath=None):
if asspath is None:
asspath = []
try:
index = self.elts.index(node)
except ValueError:
util.reraise(exceptions.InferenceError(
'Tried to retrieve a node {node!r} which does not exist',
node=self, assign_path=asspath, context=context))
asspath.insert(0, index)
return self.parent.assigned_stmts(node=self, context=context, asspath=asspath)
nodes.Tuple.assigned_stmts = sequence_assigned_stmts
nodes.List.assigned_stmts = sequence_assigned_stmts
def assend_assigned_stmts(self, node=None, context=None, asspath=None):
return self.parent.assigned_stmts(node=self, context=context)
nodes.AssignName.assigned_stmts = assend_assigned_stmts
nodes.AssignAttr.assigned_stmts = assend_assigned_stmts
def _arguments_infer_argname(self, name, context):
# arguments information may be missing, in which case we can't do anything
# more
if not (self.args or self.vararg or self.kwarg):
yield util.Uninferable
return
# first argument of instance/class method
if self.args and getattr(self.args[0], 'name', None) == name:
functype = self.parent.type
cls = self.parent.parent.scope()
is_metaclass = isinstance(cls, nodes.ClassDef) and cls.type == 'metaclass'
# If this is a metaclass, then the first argument will always
# be the class, not an instance.
if is_metaclass or functype == 'classmethod':
yield cls
return
if functype == 'method':
yield bases.Instance(self.parent.parent.frame())
return
if context and context.callcontext:
call_site = arguments.CallSite(context.callcontext)
for value in call_site.infer_argument(self.parent, name, context):
yield value
return
# TODO: just provide the type here, no need to have an empty Dict.
if name == self.vararg:
vararg = nodes.const_factory(())
vararg.parent = self
yield vararg
return
if name == self.kwarg:
kwarg = nodes.const_factory({})
kwarg.parent = self
yield kwarg
return
# if there is a default value, yield it. And then yield Uninferable to reflect
# we can't guess given argument value
try:
context = contextmod.copy_context(context)
for inferred in self.default_value(name).infer(context):
yield inferred
yield util.Uninferable
except exceptions.NoDefault:
yield util.Uninferable
def arguments_assigned_stmts(self, node=None, context=None, asspath=None):
if context.callcontext:
# reset call context/name
callcontext = context.callcontext
context = contextmod.copy_context(context)
context.callcontext = None
args = arguments.CallSite(callcontext)
return args.infer_argument(self.parent, node.name, context)
return _arguments_infer_argname(self, node.name, context)
nodes.Arguments.assigned_stmts = arguments_assigned_stmts
@decorators.raise_if_nothing_inferred
def assign_assigned_stmts(self, node=None, context=None, asspath=None):
if not asspath:
yield self.value
return
for inferred in _resolve_asspart(self.value.infer(context), asspath, context):
yield inferred
# Explicit StopIteration to return error information, see comment
# in raise_if_nothing_inferred.
raise StopIteration(dict(node=self, unknown=node,
assign_path=asspath, context=context))
nodes.Assign.assigned_stmts = assign_assigned_stmts
nodes.AugAssign.assigned_stmts = assign_assigned_stmts
def _resolve_asspart(parts, asspath, context):
"""recursive function to resolve multiple assignments"""
asspath = asspath[:]
index = asspath.pop(0)
for part in parts:
if hasattr(part, 'getitem'):
try:
assigned = part.getitem(index, context)
# XXX raise a specific exception to avoid potential hiding of
# unexpected exception ?
except (TypeError, IndexError):
return
if not asspath:
# we achieved to resolved the assignment path, don't infer the
# last part
yield assigned
elif assigned is util.Uninferable:
return
else:
# we are not yet on the last part of the path search on each
# possibly inferred value
try:
for inferred in _resolve_asspart(assigned.infer(context),
asspath, context):
yield inferred
except exceptions.InferenceError:
return
@decorators.raise_if_nothing_inferred
def excepthandler_assigned_stmts(self, node=None, context=None, asspath=None):
for assigned in node_classes.unpack_infer(self.type):
if isinstance(assigned, nodes.ClassDef):
assigned = bases.Instance(assigned)
yield assigned
# Explicit StopIteration to return error information, see comment
# in raise_if_nothing_inferred.
raise StopIteration(dict(node=self, unknown=node,
assign_path=asspath, context=context))
nodes.ExceptHandler.assigned_stmts = excepthandler_assigned_stmts
def _infer_context_manager(self, mgr, context):
try:
inferred = next(mgr.infer(context=context))
except exceptions.InferenceError:
return
if isinstance(inferred, bases.Generator):
# Check if it is decorated with contextlib.contextmanager.
func = inferred.parent
if not func.decorators:
return
for decorator_node in func.decorators.nodes:
decorator = next(decorator_node.infer(context))
if isinstance(decorator, nodes.FunctionDef):
if decorator.qname() == _CONTEXTLIB_MGR:
break
else:
# It doesn't interest us.
return
# Get the first yield point. If it has multiple yields,
# then a RuntimeError will be raised.
# TODO(cpopa): Handle flows.
yield_point = next(func.nodes_of_class(nodes.Yield), None)
if yield_point:
if not yield_point.value:
# TODO(cpopa): an empty yield. Should be wrapped to Const.
const = nodes.Const(None)
const.parent = yield_point
const.lineno = yield_point.lineno
yield const
else:
for inferred in yield_point.value.infer(context=context):
yield inferred
elif isinstance(inferred, bases.Instance):
try:
enter = next(inferred.igetattr('__enter__', context=context))
except (exceptions.InferenceError, exceptions.AttributeInferenceError):
return
if not isinstance(enter, bases.BoundMethod):
return
if not context.callcontext:
context.callcontext = contextmod.CallContext(args=[inferred])
for result in enter.infer_call_result(self, context):
yield result
@decorators.raise_if_nothing_inferred
def with_assigned_stmts(self, node=None, context=None, asspath=None):
"""Infer names and other nodes from a *with* statement.
This enables only inference for name binding in a *with* statement.
For instance, in the following code, inferring `func` will return
the `ContextManager` class, not whatever ``__enter__`` returns.
We are doing this intentionally, because we consider that the context
manager result is whatever __enter__ returns and what it is binded
using the ``as`` keyword.
class ContextManager(object):
def __enter__(self):
return 42
with ContextManager() as f:
pass
# ContextManager().infer() will return ContextManager
# f.infer() will return 42.
Arguments:
self: nodes.With
node: The target of the assignment, `as (a, b)` in `with foo as (a, b)`.
context: TODO
asspath: TODO
"""
mgr = next(mgr for (mgr, vars) in self.items if vars == node)
if asspath is None:
for result in _infer_context_manager(self, mgr, context):
yield result
else:
for result in _infer_context_manager(self, mgr, context):
# Walk the asspath and get the item at the final index.
obj = result
for index in asspath:
if not hasattr(obj, 'elts'):
raise exceptions.InferenceError(
'Wrong type ({targets!r}) for {node!r} assignment',
node=self, targets=node, assign_path=asspath,
context=context)
try:
obj = obj.elts[index]
except IndexError:
util.reraise(exceptions.InferenceError(
'Tried to infer a nonexistent target with index {index} '
'in {node!r}.', node=self, targets=node,
assign_path=asspath, context=context))
yield obj
# Explicit StopIteration to return error information, see comment
# in raise_if_nothing_inferred.
raise StopIteration(dict(node=self, unknown=node,
assign_path=asspath, context=context))
nodes.With.assigned_stmts = with_assigned_stmts
@decorators.yes_if_nothing_inferred
def starred_assigned_stmts(self, node=None, context=None, asspath=None):
"""
Arguments:
self: nodes.Starred
node: TODO
context: TODO
asspath: TODO
"""
stmt = self.statement()
if not isinstance(stmt, (nodes.Assign, nodes.For)):
raise exceptions.InferenceError('Statement {stmt!r} enclosing {node!r} '
'must be an Assign or For node.',
node=self, stmt=stmt, unknown=node,
context=context)
if isinstance(stmt, nodes.Assign):
value = stmt.value
lhs = stmt.targets[0]
if sum(1 for node in lhs.nodes_of_class(nodes.Starred)) > 1:
raise exceptions.InferenceError('Too many starred arguments in the '
' assignment targets {lhs!r}.',
node=self, targets=lhs,
unknown=node, context=context)
if context is None:
context = contextmod.InferenceContext()
try:
rhs = next(value.infer(context))
except exceptions.InferenceError:
yield util.Uninferable
return
if rhs is util.Uninferable or not hasattr(rhs, 'elts'):
# Not interested in inferred values without elts.
yield util.Uninferable
return
elts = collections.deque(rhs.elts[:])
if len(lhs.elts) > len(rhs.elts):
raise exceptions.InferenceError('More targets, {targets!r}, than '
'values to unpack, {values!r}.',
node=self, targets=lhs,
values=rhs, unknown=node,
context=context)
# Unpack iteratively the values from the rhs of the assignment,
# until the find the starred node. What will remain will
# be the list of values which the Starred node will represent
# This is done in two steps, from left to right to remove
# anything before the starred node and from right to left
# to remove anything after the starred node.
for index, node in enumerate(lhs.elts):
if not isinstance(node, nodes.Starred):
elts.popleft()
continue
lhs_elts = collections.deque(reversed(lhs.elts[index:]))
for node in lhs_elts:
if not isinstance(node, nodes.Starred):
elts.pop()
continue
# We're done
packed = nodes.List()
packed.elts = elts
packed.parent = self
yield packed
break
nodes.Starred.assigned_stmts = starred_assigned_stmts
| gpl-2.0 | 8,057,490,289,828,611,000 | 36.328179 | 83 | 0.590012 | false |
ddurieux/alignak | test/test_disable_active_checks.py | 1 | 3446 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, [email protected]
# Hartmut Goebel, [email protected]
# Grégory Starck, [email protected]
# Sebastien Coavoux, [email protected]
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from alignak_test import *
class TestDisableActiveChecks(AlignakTest):
# Uncomment this is you want to use a specific configuration
# for your test
#def setUp(self):
# self.setup_with_file('etc/alignak_disable_active_checks.cfg')
# We try to disable the actie checks and see if it's really done
# with a dummy check, so we need to get the same state and output
def test_disable_active_checks(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
print "Checks in progress", host.checks_in_progress
c = host.checks_in_progress.pop()
print c.__dict__
print c.status
self.scheduler_loop(1, [[host, 0, 'I set this host UP | value1=1 value2=2']])
self.assertEqual('UP', host.state)
self.assertEqual('HARD', host.state_type)
last_output = host.output
host.schedule()
self.sched.external_command.DISABLE_HOST_CHECK(host)
c = host.checks_in_progress.pop()
print c.__dict__
print c.status
self.assertEqual('waitconsume', c.status)
self.scheduler_loop(2, [])
print host.state
print host.output
self.assertEqual(last_output, host.output)
print len(host.checks_in_progress)
print host.in_checking
self.assertEqual(False, host.in_checking)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 | 3,982,005,406,517,748,700 | 32.446602 | 85 | 0.685631 | false |
SaintAttila/attila | attila/processes.py | 1 | 7909 | """
Tools for controlling and interacting with Windows processes.
"""
import time
import pywintypes
import win32api
import win32com.client
import win32con
from .utility import only
from .exceptions import TooFewItemsError, verify_type, verify_callable
__author__ = 'Aaron Hosford'
__all__ = [
"process_exists",
"count_processes",
"get_pids",
"get_name",
"get_command_line",
"get_parent_pid",
"get_child_pids",
"kill_process",
"kill_process_family",
"capture_process"
]
# Exit code can be any integer. I picked the binary representation of the
# string "TERM" as the default used when a process is forced to terminate.
DEFAULT_TERMINATION_EXIT_CODE = 1413829197
def process_exists(pid=None, name=None):
"""
Return a Boolean indicating whether a process exists.
:param pid: The process ID.
:param name: The process name.
:return: Whether the indicated process exists.
"""
return count_processes(pid, name) > 0
def count_processes(pid=None, name=None):
"""
Count the number of active processes. If a process ID or process name is provided, count only
processes that match the requirements.
:param pid: The process ID of the process.
:param name: The name of the process.
:return: The number of processes identified.
"""
counter = 0
for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):
if ((pid is None or process.Properties_("ProcessID").Value == pid) and
(name is None or process.Properties_("Name").Value == name)):
counter += 1
return counter
def get_pids(name=None):
"""
Return a list of process IDs of active processes.
:param name: The name of the processes.
:return: The PIDs of the processes.
"""
results = []
for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):
if name is None or process.Properties_("Name").Value == name:
results.append(process.Properties_("ProcessID").Value)
return results
def get_name(pid, default=None):
"""
Return the name of the process if it exists, or the default otherwise.
:param pid: The process ID.
:param default: The default value to return if the process does not exist.
:return: The name of the process.
"""
try:
return only(
process.Properties_("Name").Value
for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process')
if process.Properties_("ProcessID").Value == pid
)
except TooFewItemsError:
return default
def get_command_line(pid, default=None):
"""
Return the command line of the process if it exists, or the default otherwise.
:param pid: The process ID.
:param default: The default value to return if the process does not exist.
:return: The command line that created the process.
"""
try:
return only(
process.Properties_("CommandLine").Value
for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process')
if process.Properties_("ProcessID").Value == pid
)
except TooFewItemsError:
return default
def get_parent_pid(pid):
"""
Return the process ID of the parent process. If no parent, return None.
:param pid: The process ID of the child process.
:return: The process ID of the parent process, or None.
"""
wmi = win32com.client.GetObject('winmgmts:')
# noinspection SqlDialectInspection,SqlNoDataSourceInspection
parent_pids = wmi.ExecQuery(
'SELECT ParentProcessID FROM Win32_Process WHERE ProcessID=%s' % pid
)
if not parent_pids:
return None
return only(parent_pids).Properties_('ParentProcessID').Value
def get_child_pids(pid):
"""
Return the process IDs of the child processes in a list.
:param pid: The process ID of the parent process.
:return: A list of the child process IDs.
"""
wmi = win32com.client.GetObject('winmgmts:')
# noinspection SqlNoDataSourceInspection,SqlDialectInspection
children = wmi.ExecQuery('SELECT * FROM Win32_Process WHERE ParentProcessID = %s' % pid)
return [child.Properties_('ProcessId').Value for child in children]
def kill_process(pid, exit_code=None):
"""
Kill a specific process.
:param pid: The process ID of the process to be terminated.
:param exit_code: The exit code that the terminated process should return. (Default is
DEFAULT_TERMINATION_EXIT_CODE.)
:return: Whether the process was successfully terminated.
"""
if exit_code is None:
exit_code = DEFAULT_TERMINATION_EXIT_CODE
try:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pid)
except pywintypes.error:
return False # "The parameter is incorrect."
if not handle:
return False
try:
win32api.TerminateProcess(handle, exit_code)
return True
except pywintypes.error:
return False # "Access is denied."
finally:
win32api.CloseHandle(handle)
def kill_process_family(pid, exit_code=None, timeout=None):
"""
Kill a specific process and all descendant processes.
:param pid: The process ID of the root process to terminate.
:param exit_code: The exit code to be returned by each terminated process.
:param timeout: The maximum time in seconds to continue trying to kill the processes.
:return: None
"""
if timeout is not None:
end_time = time.time() + timeout
else:
end_time = None
while True:
children = get_child_pids(pid)
if not children:
break
if end_time is not None and time.time() >= end_time:
raise TimeoutError("Unable to kill child processes.")
for child in children:
kill_process_family(child, exit_code)
kill_process(pid, exit_code)
def capture_process(command, process_name=None, closer=None, args=None, kwargs=None):
"""
Call the command and capture its return value. Watch for a unique process to be created by the
command, and capture its PID. If a unique new process could not be identified, raise an
exception. If anything goes wrong after the command is called, and a closer has been provided,
pass the return value from the command to the closer before raising the exception.
:param command: A Python callable (a function, method, lambda, or class initializer).
:param process_name: The expected name of the process that will be created by the command.
:param closer: A Python callable that releases resources if an exception occurs.
:param args: Arguments to be passed to the command.
:param kwargs: Keyword arguments to be passed to the command.
:return: A pair, (result, pid), where result is the return value of the command and pid is the
new process ID.
"""
verify_callable(command)
verify_type(process_name, str, non_empty=True, allow_none=True)
verify_callable(closer, allow_none=True)
if args is None:
args = ()
if kwargs is None:
kwargs = {}
wmi = win32com.client.GetObject('winmgmts:')
before = {
process.Properties_("ProcessID").Value: process
for process in wmi.InstancesOf('Win32_Process')
if (process_name is None or process.Properties_("Name").Value == process_name)
}
result = command(*args, **kwargs)
try:
after = {
process.Properties_("ProcessID").Value: process
for process in wmi.InstancesOf('Win32_Process')
if process.Properties_("Name").Value == process_name
}
new_pids = set(after) - set(before)
return result, only(new_pids)
except:
if closer is not None:
closer(result)
raise
| mit | -3,496,954,635,227,142,000 | 30.50996 | 98 | 0.663801 | false |
tensorflow/runtime | third_party/llvm/expand_cmake_vars.py | 1 | 2647 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Expands CMake variables in a text file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
_CMAKE_DEFINE_REGEX = re.compile(r"\s*#cmakedefine\s+([A-Za-z_0-9]*)(\s.*)?$")
_CMAKE_DEFINE01_REGEX = re.compile(r"\s*#cmakedefine01\s+([A-Za-z_0-9]*)")
_CMAKE_VAR_REGEX = re.compile(r"\${([A-Za-z_0-9]*)}")
def _parse_args(argv):
"""Parses arguments with the form KEY=VALUE into a dictionary."""
result = {}
for arg in argv:
k, v = arg.split("=")
result[k] = v
return result
def _expand_variables(input_str, cmake_vars):
"""Expands ${VARIABLE}s in 'input_str', using dictionary 'cmake_vars'.
Args:
input_str: the string containing ${VARIABLE} expressions to expand.
cmake_vars: a dictionary mapping variable names to their values.
Returns:
The expanded string.
"""
def replace(match):
if match.group(1) in cmake_vars:
return cmake_vars[match.group(1)]
return ""
return _CMAKE_VAR_REGEX.sub(replace, input_str)
def _expand_cmakedefines(line, cmake_vars):
"""Expands #cmakedefine declarations, using a dictionary 'cmake_vars'."""
# Handles #cmakedefine lines
match = _CMAKE_DEFINE_REGEX.match(line)
if match:
name = match.group(1)
suffix = match.group(2) or ""
if name in cmake_vars:
return "#define {}{}\n".format(name,
_expand_variables(suffix, cmake_vars))
else:
return "/* #undef {} */\n".format(name)
# Handles #cmakedefine01 lines
match = _CMAKE_DEFINE01_REGEX.match(line)
if match:
name = match.group(1)
value = cmake_vars.get(name, "0")
return "#define {} {}\n".format(name, value)
# Otherwise return the line unchanged.
return _expand_variables(line, cmake_vars)
def main():
cmake_vars = _parse_args(sys.argv[1:])
for line in sys.stdin:
sys.stdout.write(_expand_cmakedefines(line, cmake_vars))
if __name__ == "__main__":
main()
| apache-2.0 | -6,928,396,880,634,280,000 | 28.741573 | 80 | 0.649037 | false |
ericholscher/django | django/contrib/admindocs/views.py | 1 | 15158 | from importlib import import_module
import inspect
import os
import re
from django import template
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils._os import upath
from django.utils import six
from django.utils.translation import ugettext as _
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
@staff_member_required
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': urlresolvers.reverse('admin:index'),
}, context_instance=RequestContext(request))
@staff_member_required
def bookmarklets(request):
admin_root = urlresolvers.reverse('admin:index')
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': "%s://%s%s" % (request.scheme, request.get_host(), admin_root),
}, context_instance=RequestContext(request))
@staff_member_required
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'tags': tags
}, context_instance=RequestContext(request))
@staff_member_required
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'filters': filters
}, context_instance=RequestContext(request))
@staff_member_required
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'views': views
}, context_instance=RequestContext(request))
@staff_member_required
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
@staff_member_required
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'models': m_list
}, context_instance=RequestContext(request))
@staff_member_required
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.model_name == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst(
(_("the related `%(app_label)s.%(data_type)s` object") % {
'app_label': app_label, 'data_type': data_type,
}),
'model',
_('model:') + data_type,
)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % field.name,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name': "%s.all" % accessor,
'data_type': 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % accessor,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': '%s.%s' % (opts.app_label, opts.object_name),
# Translators: %s is an object type name
'summary': _("Attributes on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
@staff_member_required
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(upath(mod.__file__)))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
template.get_library(library_name)
except template.InvalidTemplateLibrary:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| bsd-3-clause | -5,536,292,160,864,529,000 | 37.866667 | 143 | 0.591701 | false |
mlcommons/training | translation/tensorflow/process_data.py | 1 | 15302 | # Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Download and preprocess WMT17 ende training and evaluation datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
import tarfile
import urllib
import six
import tensorflow as tf
import urllib.request
from mlperf_compliance import mlperf_log
from utils import tokenizer
# Data sources for training/evaluating the transformer translation model.
# If any of the training sources are changed, then either:
# 1) use the flag `--search` to find the best min count or
# 2) update the _TRAIN_DATA_MIN_COUNT constant.
# min_count is the minimum number of times a token must appear in the data
# before it is added to the vocabulary. "Best min count" refers to the value
# that generates a vocabulary set that is closest in size to _TARGET_VOCAB_SIZE.
_TRAIN_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/"
"training-parallel-nc-v12.tgz",
"input": "news-commentary-v12.de-en.en",
"target": "news-commentary-v12.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
"input": "commoncrawl.de-en.en",
"target": "commoncrawl.de-en.de",
},
{
"url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
"input": "europarl-v7.de-en.en",
"target": "europarl-v7.de-en.de",
},
]
# Use pre-defined minimum count to generate subtoken vocabulary.
_TRAIN_DATA_MIN_COUNT = 6
_EVAL_DATA_SOURCES = [
{
"url": "http://data.statmt.org/wmt17/translation-task/dev.tgz",
"input": "newstest2013.en",
"target": "newstest2013.de",
}
]
# Vocabulary constants
_TARGET_VOCAB_SIZE = 32768 # Number of subtokens in the vocabulary list.
_TARGET_THRESHOLD = 327 # Accept vocabulary if size is within this threshold
_VOCAB_FILE = "vocab.ende.%d" % _TARGET_VOCAB_SIZE
# Strings to inclue in the generated files.
_PREFIX = "wmt32k"
_COMPILE_TAG = "compiled"
_ENCODE_TAG = "encoded"
_TRAIN_TAG = "train"
_EVAL_TAG = "dev" # Following WMT and Tensor2Tensor conventions, in which the
# evaluation datasets are tagged as "dev" for development.
# Number of files to split train and evaluation data
_TRAIN_SHARDS = 100
_EVAL_SHARDS = 1
def find_file(path, filename, max_depth=5):
"""Returns full filepath if the file is in path or a subdirectory."""
for root, dirs, files in os.walk(path):
if filename in files:
return os.path.join(root, filename)
# Don't search past max_depth
depth = root[len(path) + 1:].count(os.sep)
if depth > max_depth:
del dirs[:] # Clear dirs
return None
###############################################################################
# Download and extraction functions
###############################################################################
def get_raw_files(raw_dir, data_source):
"""Return raw files from source. Downloads/extracts if needed.
Args:
raw_dir: string directory to store raw files
data_source: dictionary with
{"url": url of compressed dataset containing input and target files
"input": file with data in input language
"target": file with data in target language}
Returns:
dictionary with
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
"""
raw_files = {
"inputs": [],
"targets": [],
} # keys
for d in data_source:
input_file, target_file = download_and_extract(
raw_dir, d["url"], d["input"], d["target"])
raw_files["inputs"].append(input_file)
raw_files["targets"].append(target_file)
return raw_files
def download_report_hook(count, block_size, total_size):
"""Report hook for download progress.
Args:
count: current block number
block_size: block size
total_size: total size
"""
percent = int(count * block_size * 100 / total_size)
print("\r%d%%" % percent + " completed", end="\r")
def download_from_url(path, url):
"""Download content from a url.
Args:
path: string directory where file will be downloaded
url: string url
Returns:
Full path to downloaded file
"""
filename = url.split("/")[-1]
found_file = find_file(path, filename, max_depth=0)
if found_file is None:
filename = os.path.join(path, filename)
tf.logging.info("Downloading from %s to %s." % (url, filename))
inprogress_filepath = filename + ".incomplete"
inprogress_filepath, _ = urllib.request.urlretrieve(
url, inprogress_filepath, reporthook=download_report_hook)
# Print newline to clear the carriage return from the download progress.
print()
tf.gfile.Rename(inprogress_filepath, filename)
return filename
else:
tf.logging.info("Already downloaded: %s (at %s)." % (url, found_file))
return found_file
def download_and_extract(path, url, input_filename, target_filename):
"""Extract files from downloaded compressed archive file.
Args:
path: string directory where the files will be downloaded
url: url containing the compressed input and target files
input_filename: name of file containing data in source language
target_filename: name of file containing data in target language
Returns:
Full paths to extracted input and target files.
Raises:
OSError: if the the download/extraction fails.
"""
# Check if extracted files already exist in path
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
tf.logging.info("Already downloaded and extracted %s." % url)
return input_file, target_file
# Download archive file if it doesn't already exist.
compressed_file = download_from_url(path, url)
# Extract compressed files
tf.logging.info("Extracting %s." % compressed_file)
with tarfile.open(compressed_file, "r:gz") as corpus_tar:
corpus_tar.extractall(path)
# Return filepaths of the requested files.
input_file = find_file(path, input_filename)
target_file = find_file(path, target_filename)
if input_file and target_file:
return input_file, target_file
raise OSError("Download/extraction failed for url %s to path %s" %
(url, path))
def txt_line_iterator(path):
"""Iterate through lines of file."""
with tf.gfile.Open(path) as f:
for line in f:
yield line.strip()
def compile_files(data_dir, raw_files, tag):
"""Compile raw files into a single file for each language.
Args:
raw_dir: Directory containing downloaded raw files.
raw_files: Dict containing filenames of input and target data.
{"inputs": list of files containing data in input language
"targets": list of files containing corresponding data in target language
}
tag: String to append to the compiled filename.
Returns:
Full path of compiled input and target files.
"""
tf.logging.info("Compiling files with tag %s." % tag)
filename = "%s-%s-%s" % (_PREFIX, _COMPILE_TAG, tag)
input_compiled_file = os.path.join(data_dir, filename + ".lang1")
target_compiled_file = os.path.join(data_dir, filename + ".lang2")
with tf.gfile.Open(input_compiled_file, mode="w") as input_writer:
with tf.gfile.Open(target_compiled_file, mode="w") as target_writer:
for i in range(len(raw_files["inputs"])):
input_file = raw_files["inputs"][i]
target_file = raw_files["targets"][i]
tf.logging.info("Reading files %s and %s." % (input_file, target_file))
write_file(input_writer, input_file)
write_file(target_writer, target_file)
return input_compiled_file, target_compiled_file
def write_file(writer, filename):
"""Write all of lines from file using the writer."""
for line in txt_line_iterator(filename):
writer.write(line)
writer.write("\n")
###############################################################################
# Data preprocessing
###############################################################################
def encode_and_save_files(
subtokenizer, data_dir, raw_files, tag, total_shards):
"""Save data from files as encoded Examples in TFrecord format.
Args:
subtokenizer: Subtokenizer object that will be used to encode the strings.
data_dir: The directory in which to write the examples
raw_files: A tuple of (input, target) data files. Each line in the input and
the corresponding line in target file will be saved in a tf.Example.
tag: String that will be added onto the file names.
total_shards: Number of files to divide the data into.
Returns:
List of all files produced.
"""
# Create a file for each shard.
filepaths = [shard_filename(data_dir, tag, n + 1, total_shards)
for n in range(total_shards)]
if all_exist(filepaths):
tf.logging.info("Files with tag %s already exist." % tag)
return filepaths
tf.logging.info("Saving files with tag %s." % tag)
input_file = raw_files[0]
target_file = raw_files[1]
# Write examples to each shard in round robin order.
tmp_filepaths = [fname + ".incomplete" for fname in filepaths]
writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filepaths]
counter, shard = 0, 0
for counter, (input_line, target_line) in enumerate(zip(
txt_line_iterator(input_file), txt_line_iterator(target_file))):
if counter > 0 and counter % 100000 == 0:
tf.logging.info("\tSaving case %d." % counter)
example = dict_to_example(
{"inputs": subtokenizer.encode(input_line, add_eos=True),
"targets": subtokenizer.encode(target_line, add_eos=True)})
writers[shard].write(example.SerializeToString())
shard = (shard + 1) % total_shards
for writer in writers:
writer.close()
for tmp_name, final_name in zip(tmp_filepaths, filepaths):
tf.gfile.Rename(tmp_name, final_name)
if tag == _TRAIN_TAG:
mlperf_log.transformer_print(key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES,
value=counter)
elif tag == _EVAL_TAG:
mlperf_log.transformer_print(key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES,
value=counter)
tf.logging.info("Saved %d Examples", counter)
return filepaths
def shard_filename(path, tag, shard_num, total_shards):
"""Create filename for data shard."""
return os.path.join(
path, "%s-%s-%s-%.5d-of-%.5d" %
(_PREFIX, _ENCODE_TAG, tag, shard_num, total_shards))
def shuffle_records(fname):
"""Shuffle records in a single file."""
tf.logging.info("Shuffling records in file %s" % fname)
# Rename file prior to shuffling
tmp_fname = fname + ".unshuffled"
tf.gfile.Rename(fname, tmp_fname)
reader = tf.python_io.tf_record_iterator(tmp_fname)
records = []
for record in reader:
records.append(record)
if len(records) % 100000 == 0:
tf.logging.info("\tRead: %d", len(records))
random.shuffle(records)
# Write shuffled records to original file name
with tf.python_io.TFRecordWriter(fname) as w:
for count, record in enumerate(records):
w.write(record)
if count > 0 and count % 100000 == 0:
tf.logging.info("\tWriting record: %d" % count)
tf.gfile.Remove(tmp_fname)
def dict_to_example(dictionary):
"""Converts a dictionary of string->int to a tf.Example."""
features = {}
for k, v in six.iteritems(dictionary):
features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v))
return tf.train.Example(features=tf.train.Features(feature=features))
def all_exist(filepaths):
"""Returns true if all files in the list exist."""
for fname in filepaths:
if not tf.gfile.Exists(fname):
return False
return True
def make_dir(path):
if not tf.gfile.Exists(path):
tf.logging.info("Creating directory %s" % path)
tf.gfile.MakeDirs(path)
def main(unused_argv):
"""Obtain training and evaluation data for the Transformer model."""
tf.logging.set_verbosity(tf.logging.INFO)
make_dir(FLAGS.raw_dir)
make_dir(FLAGS.data_dir)
# Get paths of download/extracted training and evaluation files.
tf.logging.info("Step 1/4: Downloading data from source")
train_files = get_raw_files(FLAGS.raw_dir, _TRAIN_DATA_SOURCES)
eval_files = get_raw_files(FLAGS.raw_dir, _EVAL_DATA_SOURCES)
# Create subtokenizer based on the training files.
tf.logging.info("Step 2/4: Creating subtokenizer and building vocabulary")
train_files_flat = train_files["inputs"] + train_files["targets"]
vocab_file = os.path.join(FLAGS.data_dir, _VOCAB_FILE)
subtokenizer = tokenizer.Subtokenizer.init_from_files(
vocab_file, train_files_flat, _TARGET_VOCAB_SIZE, _TARGET_THRESHOLD,
min_count=None if FLAGS.search else _TRAIN_DATA_MIN_COUNT)
tf.logging.info("Step 3/4: Compiling training and evaluation data")
compiled_train_files = compile_files(FLAGS.data_dir, train_files, _TRAIN_TAG)
compiled_eval_files = compile_files(FLAGS.data_dir, eval_files, _EVAL_TAG)
# Tokenize and save data as Examples in the TFRecord format.
tf.logging.info("Step 4/4: Preprocessing and saving data")
mlperf_log.transformer_print(key=mlperf_log.PREPROC_TOKENIZE_TRAINING)
train_tfrecord_files = encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_train_files, _TRAIN_TAG,
_TRAIN_SHARDS)
mlperf_log.transformer_print(key=mlperf_log.PREPROC_TOKENIZE_EVAL)
encode_and_save_files(
subtokenizer, FLAGS.data_dir, compiled_eval_files, _EVAL_TAG,
_EVAL_SHARDS)
mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER)
for fname in train_tfrecord_files:
shuffle_records(fname)
if __name__ == "__main__":
mlperf_log.ROOT_DIR_TRANSFORMER = os.path.dirname(os.path.realpath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", "-dd", type=str, default="/tmp/translate_ende",
help="[default: %(default)s] Directory for where the "
"translate_ende_wmt32k dataset is saved.",
metavar="<DD>")
parser.add_argument(
"--raw_dir", "-rd", type=str, default="/tmp/translate_ende_raw",
help="[default: %(default)s] Path where the raw data will be downloaded "
"and extracted.",
metavar="<RD>")
parser.add_argument(
"--search", action="store_true",
help="If set, use binary search to find the vocabulary set with size"
"closest to the target size (%d)." % _TARGET_VOCAB_SIZE)
FLAGS, unparsed = parser.parse_known_args()
main(sys.argv)
| apache-2.0 | 2,412,527,883,679,697,400 | 34.09633 | 80 | 0.668213 | false |
imankulov/sentry | src/sentry/plugins/sentry_mail/models.py | 1 | 5759 | """
sentry.plugins.sentry_mail.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sentry
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from sentry.plugins import register
from sentry.plugins.bases.notify import NotificationPlugin
from sentry.utils.cache import cache
from sentry.utils.email import MessageBuilder, group_id_to_email
from sentry.utils.http import absolute_uri
NOTSET = object()
class MailPlugin(NotificationPlugin):
title = 'Mail'
conf_key = 'mail'
slug = 'mail'
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
project_default_enabled = True
project_conf_form = None
subject_prefix = settings.EMAIL_SUBJECT_PREFIX
def _send_mail(self, subject, template=None, html_template=None, body=None,
project=None, group=None, headers=None, context=None):
send_to = self.get_send_to(project)
if not send_to:
return
subject_prefix = self.get_option('subject_prefix', project) or self.subject_prefix
subject_prefix = force_text(subject_prefix)
subject = force_text(subject)
msg = MessageBuilder(
subject='%s%s' % (subject_prefix, subject),
template=template,
html_template=html_template,
body=body,
headers=headers,
context=context,
reference=group,
)
msg.add_users(send_to, project=project)
return msg.send()
def send_test_mail(self, project=None):
self._send_mail(
subject='Test Email',
body='This email was requested as a test of Sentry\'s outgoing email',
project=project,
)
def get_notification_settings_url(self):
return absolute_uri(reverse('sentry-account-settings-notifications'))
def get_project_url(self, project):
return absolute_uri(reverse('sentry-stream', args=[
project.organization.slug,
project.slug,
]))
def should_notify(self, group, event):
send_to = self.get_sendable_users(group.project)
if not send_to:
return False
return super(MailPlugin, self).should_notify(group, event)
def get_send_to(self, project=None):
"""
Returns a list of email addresses for the users that should be notified of alerts.
The logic for this is a bit complicated, but it does the following:
The results of this call can be fairly expensive to calculate, so the send_to list gets cached
for 60 seconds.
"""
if project:
project_id = project.pk
else:
project_id = ''
if not (project and project.team):
return []
conf_key = self.get_conf_key()
cache_key = '%s:send_to:%s' % (conf_key, project_id)
send_to_list = cache.get(cache_key)
if send_to_list is None:
send_to_list = self.get_sendable_users(project)
send_to_list = filter(bool, send_to_list)
cache.set(cache_key, send_to_list, 60) # 1 minute cache
return send_to_list
def notify(self, notification):
event = notification.event
group = event.group
project = group.project
interface_list = []
for interface in event.interfaces.itervalues():
body = interface.to_email_html(event)
if not body:
continue
text_body = interface.to_string(event)
interface_list.append(
(interface.get_title(), mark_safe(body), text_body)
)
subject = group.get_email_subject()
link = group.get_absolute_url()
template = 'sentry/emails/error.txt'
html_template = 'sentry/emails/error.html'
rules = []
for rule in notification.rules:
rule_link = reverse('sentry-edit-project-rule', args=[
group.organization.slug, project.slug, rule.id
])
rules.append((rule.label, rule_link))
context = {
'project_label': project.get_full_name(),
'group': group,
'event': event,
'tags': event.get_tags(),
'link': link,
'interfaces': interface_list,
'rules': rules,
}
headers = {
'X-Sentry-Logger': group.logger,
'X-Sentry-Logger-Level': group.get_level_display(),
'X-Sentry-Team': project.team.name,
'X-Sentry-Project': project.name,
'X-Sentry-Reply-To': group_id_to_email(group.id),
}
self._send_mail(
subject=subject,
template=template,
html_template=html_template,
project=project,
group=group,
headers=headers,
context=context,
)
def notify_digest(self, project, digest):
context = {
'project': project,
'digest': digest,
}
self._send_mail(
subject=render_to_string('sentry/emails/digests/subject.txt', context).rstrip(),
template='sentry/emails/digests/body.txt',
html_template='sentry/emails/digests/body.html',
project=project,
context=context,
)
# Legacy compatibility
MailProcessor = MailPlugin
register(MailPlugin)
| bsd-3-clause | 8,034,038,789,124,717,000 | 29.796791 | 102 | 0.592464 | false |
anbangleo/NlsdeWeb | Python-3.6.0/Lib/test/test_warnings/__init__.py | 1 | 47896 | from contextlib import contextmanager
import linecache
import os
from io import StringIO
import re
import sys
import textwrap
import unittest
from test import support
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.test_warnings.data import stacklevel as warning_tests
import warnings as original_warnings
py_warnings = support.import_fresh_module('warnings', blocked=['_warnings'])
c_warnings = support.import_fresh_module('warnings', fresh=['_warnings'])
@contextmanager
def warnings_state(module):
"""Use a specific warnings implementation in warning_tests."""
global __warningregistry__
for to_clear in (sys, warning_tests):
try:
to_clear.__warningregistry__.clear()
except AttributeError:
pass
try:
__warningregistry__.clear()
except NameError:
pass
original_warnings = warning_tests.warnings
original_filters = module.filters
try:
module.filters = original_filters[:]
module.simplefilter("once")
warning_tests.warnings = module
yield
finally:
warning_tests.warnings = original_warnings
module.filters = original_filters
class BaseTest:
"""Basic bookkeeping required for testing."""
def setUp(self):
self.old_unittest_module = unittest.case.warnings
# The __warningregistry__ needs to be in a pristine state for tests
# to work properly.
if '__warningregistry__' in globals():
del globals()['__warningregistry__']
if hasattr(warning_tests, '__warningregistry__'):
del warning_tests.__warningregistry__
if hasattr(sys, '__warningregistry__'):
del sys.__warningregistry__
# The 'warnings' module must be explicitly set so that the proper
# interaction between _warnings and 'warnings' can be controlled.
sys.modules['warnings'] = self.module
# Ensure that unittest.TestCase.assertWarns() uses the same warnings
# module than warnings.catch_warnings(). Otherwise,
# warnings.catch_warnings() will be unable to remove the added filter.
unittest.case.warnings = self.module
super(BaseTest, self).setUp()
def tearDown(self):
sys.modules['warnings'] = original_warnings
unittest.case.warnings = self.old_unittest_module
super(BaseTest, self).tearDown()
class PublicAPITests(BaseTest):
"""Ensures that the correct values are exposed in the
public API.
"""
def test_module_all_attribute(self):
self.assertTrue(hasattr(self.module, '__all__'))
target_api = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
self.assertSetEqual(set(self.module.__all__),
set(target_api))
class CPublicAPITests(PublicAPITests, unittest.TestCase):
module = c_warnings
class PyPublicAPITests(PublicAPITests, unittest.TestCase):
module = py_warnings
class FilterTests(BaseTest):
"""Testing the filtering functionality."""
def test_error(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_error")
def test_error_after_default(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_ignore_after_default"
def f():
self.module.warn(message, UserWarning)
with support.captured_stderr() as stderr:
f()
stderr = stderr.getvalue()
self.assertIn("UserWarning: FilterTests.test_ignore_after_default",
stderr)
self.assertIn("self.module.warn(message, UserWarning)",
stderr)
self.module.filterwarnings("error", category=UserWarning)
self.assertRaises(UserWarning, f)
def test_ignore(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.warn("FilterTests.test_ignore", UserWarning)
self.assertEqual(len(w), 0)
def test_ignore_after_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_ignore_after_default"
def f():
self.module.warn(message, UserWarning)
f()
self.module.filterwarnings("ignore", category=UserWarning)
f()
f()
self.assertEqual(len(w), 1)
def test_always(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
message = "FilterTests.test_always"
self.module.warn(message, UserWarning)
self.assertTrue(message, w[-1].message)
self.module.warn(message, UserWarning)
self.assertTrue(w[-1].message, message)
def test_always_after_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
message = "FilterTests.test_always_after_ignore"
def f():
self.module.warn(message, UserWarning)
f()
self.assertEqual(len(w), 1)
self.assertEqual(w[-1].message.args[0], message)
f()
self.assertEqual(len(w), 1)
self.module.filterwarnings("always", category=UserWarning)
f()
self.assertEqual(len(w), 2)
self.assertEqual(w[-1].message.args[0], message)
f()
self.assertEqual(len(w), 3)
self.assertEqual(w[-1].message.args[0], message)
def test_default(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("default", category=UserWarning)
message = UserWarning("FilterTests.test_default")
for x in range(2):
self.module.warn(message, UserWarning)
if x == 0:
self.assertEqual(w[-1].message, message)
del w[:]
elif x == 1:
self.assertEqual(len(w), 0)
else:
raise ValueError("loop variant unhandled")
def test_module(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("module", category=UserWarning)
message = UserWarning("FilterTests.test_module")
self.module.warn(message, UserWarning)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn(message, UserWarning)
self.assertEqual(len(w), 0)
def test_once(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
message = UserWarning("FilterTests.test_once")
self.module.warn_explicit(message, UserWarning, "__init__.py",
42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "__init__.py",
13)
self.assertEqual(len(w), 0)
self.module.warn_explicit(message, UserWarning, "test_warnings2.py",
42)
self.assertEqual(len(w), 0)
def test_inheritance(self):
with original_warnings.catch_warnings(module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("error", category=Warning)
self.assertRaises(UserWarning, self.module.warn,
"FilterTests.test_inheritance", UserWarning)
def test_ordering(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning,
append=True)
del w[:]
try:
self.module.warn("FilterTests.test_ordering", UserWarning)
except UserWarning:
self.fail("order handling for actions failed")
self.assertEqual(len(w), 0)
def test_filterwarnings(self):
# Test filterwarnings().
# Implicitly also tests resetwarnings().
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
self.module.resetwarnings()
text = 'handle normally'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
self.module.filterwarnings("ignore", "", Warning, "", 0)
text = 'filtered out'
self.module.warn(text)
self.assertNotEqual(str(w[-1].message), text)
self.module.resetwarnings()
self.module.filterwarnings("error", "hex*", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn, 'hex/oct')
text = 'nonmatching text'
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
def test_message_matching(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("ignore", UserWarning)
self.module.filterwarnings("error", "match", UserWarning)
self.assertRaises(UserWarning, self.module.warn, "match")
self.assertRaises(UserWarning, self.module.warn, "match prefix")
self.module.warn("suffix match")
self.assertEqual(w, [])
self.module.warn("something completely different")
self.assertEqual(w, [])
def test_mutate_filter_list(self):
class X:
def match(self, a):
L[:] = []
L = [("default",X(),UserWarning,X(),0) for i in range(2)]
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.filters = L
self.module.warn_explicit(UserWarning("b"), None, "f.py", 42)
self.assertEqual(str(w[-1].message), "b")
def test_filterwarnings_duplicate_filters(self):
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.filterwarnings("error", category=UserWarning)
self.assertEqual(len(self.module.filters), 1)
self.module.filterwarnings("ignore", category=UserWarning)
self.module.filterwarnings("error", category=UserWarning)
self.assertEqual(
len(self.module.filters), 2,
"filterwarnings inserted duplicate filter"
)
self.assertEqual(
self.module.filters[0][0], "error",
"filterwarnings did not promote filter to "
"the beginning of list"
)
def test_simplefilter_duplicate_filters(self):
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.simplefilter("error", category=UserWarning)
self.assertEqual(len(self.module.filters), 1)
self.module.simplefilter("ignore", category=UserWarning)
self.module.simplefilter("error", category=UserWarning)
self.assertEqual(
len(self.module.filters), 2,
"simplefilter inserted duplicate filter"
)
self.assertEqual(
self.module.filters[0][0], "error",
"simplefilter did not promote filter to the beginning of list"
)
def test_append_duplicate(self):
with original_warnings.catch_warnings(module=self.module,
record=True) as w:
self.module.resetwarnings()
self.module.simplefilter("ignore")
self.module.simplefilter("error", append=True)
self.module.simplefilter("ignore", append=True)
self.module.warn("test_append_duplicate", category=UserWarning)
self.assertEqual(len(self.module.filters), 2,
"simplefilter inserted duplicate filter"
)
self.assertEqual(len(w), 0,
"appended duplicate changed order of filters"
)
class CFilterTests(FilterTests, unittest.TestCase):
module = c_warnings
class PyFilterTests(FilterTests, unittest.TestCase):
module = py_warnings
class WarnTests(BaseTest):
"""Test warnings.warn() and warnings.warn_explicit()."""
def test_message(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
for i in range(4):
text = 'multi %d' %i # Different text on each call.
self.module.warn(text)
self.assertEqual(str(w[-1].message), text)
self.assertTrue(w[-1].category is UserWarning)
# Issue 3639
def test_warn_nonstandard_types(self):
# warn() should handle non-standard types without issue.
for ob in (Warning, None, 42):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter("once")
self.module.warn(ob)
# Don't directly compare objects since
# ``Warning() != Warning()``.
self.assertEqual(str(w[-1].message), str(UserWarning(ob)))
def test_filename(self):
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam1")
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam2")
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
def test_stacklevel(self):
# Test stacklevel argument
# make sure all messages are different, so the warning won't be skipped
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam3", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam4", stacklevel=1)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.inner("spam5", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"__init__.py")
warning_tests.outer("spam6", stacklevel=2)
self.assertEqual(os.path.basename(w[-1].filename),
"stacklevel.py")
warning_tests.outer("spam6.5", stacklevel=3)
self.assertEqual(os.path.basename(w[-1].filename),
"__init__.py")
warning_tests.inner("spam7", stacklevel=9999)
self.assertEqual(os.path.basename(w[-1].filename),
"sys")
def test_stacklevel_import(self):
# Issue #24305: With stacklevel=2, module-level warnings should work.
support.unload('test.test_warnings.data.import_warning')
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.simplefilter('always')
import test.test_warnings.data.import_warning
self.assertEqual(len(w), 1)
self.assertEqual(w[0].filename, __file__)
def test_missing_filename_not_main(self):
# If __file__ is not specified and __main__ is not the module name,
# then __file__ should be set to the module name.
filename = warning_tests.__file__
try:
del warning_tests.__file__
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner("spam8", stacklevel=1)
self.assertEqual(w[-1].filename, warning_tests.__name__)
finally:
warning_tests.__file__ = filename
@unittest.skipUnless(hasattr(sys, 'argv'), 'test needs sys.argv')
def test_missing_filename_main_with_argv(self):
# If __file__ is not specified and the caller is __main__ and sys.argv
# exists, then use sys.argv[0] as the file.
filename = warning_tests.__file__
module_name = warning_tests.__name__
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam9', stacklevel=1)
self.assertEqual(w[-1].filename, sys.argv[0])
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
def test_missing_filename_main_without_argv(self):
# If __file__ is not specified, the caller is __main__, and sys.argv
# is not set, then '__main__' is the file name.
filename = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
del sys.argv
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam10', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = filename
warning_tests.__name__ = module_name
sys.argv = argv
def test_missing_filename_main_with_argv_empty_string(self):
# If __file__ is not specified, the caller is __main__, and sys.argv[0]
# is the empty string, then '__main__ is the file name.
# Tests issue 2743.
file_name = warning_tests.__file__
module_name = warning_tests.__name__
argv = sys.argv
try:
del warning_tests.__file__
warning_tests.__name__ = '__main__'
sys.argv = ['']
with warnings_state(self.module):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
warning_tests.inner('spam11', stacklevel=1)
self.assertEqual(w[-1].filename, '__main__')
finally:
warning_tests.__file__ = file_name
warning_tests.__name__ = module_name
sys.argv = argv
def test_warn_explicit_non_ascii_filename(self):
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("always", category=UserWarning)
for filename in ("nonascii\xe9\u20ac", "surrogate\udc80"):
try:
os.fsencode(filename)
except UnicodeEncodeError:
continue
self.module.warn_explicit("text", UserWarning, filename, 1)
self.assertEqual(w[-1].filename, filename)
def test_warn_explicit_type_errors(self):
# warn_explicit() should error out gracefully if it is given objects
# of the wrong types.
# lineno is expected to be an integer.
self.assertRaises(TypeError, self.module.warn_explicit,
None, UserWarning, None, None)
# Either 'message' needs to be an instance of Warning or 'category'
# needs to be a subclass.
self.assertRaises(TypeError, self.module.warn_explicit,
None, None, None, 1)
# 'registry' must be a dict or None.
self.assertRaises((TypeError, AttributeError),
self.module.warn_explicit,
None, Warning, None, 1, registry=42)
def test_bad_str(self):
# issue 6415
# Warnings instance with a bad format string for __str__ should not
# trigger a bus error.
class BadStrWarning(Warning):
"""Warning with a bad format string for __str__."""
def __str__(self):
return ("A bad formatted string %(err)" %
{"err" : "there is no %(err)s"})
with self.assertRaises(ValueError):
self.module.warn(BadStrWarning())
def test_warning_classes(self):
class MyWarningClass(Warning):
pass
class NonWarningSubclass:
pass
# passing a non-subclass of Warning should raise a TypeError
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', '')
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', NonWarningSubclass)
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
# check that warning instances also raise a TypeError
with self.assertRaises(TypeError) as cm:
self.module.warn('bad warning category', MyWarningClass())
self.assertIn('category must be a Warning subclass, not ',
str(cm.exception))
with original_warnings.catch_warnings(module=self.module):
self.module.resetwarnings()
self.module.filterwarnings('default')
with self.assertWarns(MyWarningClass) as cm:
self.module.warn('good warning category', MyWarningClass)
self.assertEqual('good warning category', str(cm.warning))
with self.assertWarns(UserWarning) as cm:
self.module.warn('good warning category', None)
self.assertEqual('good warning category', str(cm.warning))
with self.assertWarns(MyWarningClass) as cm:
self.module.warn('good warning category', MyWarningClass)
self.assertIsInstance(cm.warning, Warning)
class CWarnTests(WarnTests, unittest.TestCase):
module = c_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_accelerated(self):
self.assertFalse(original_warnings is self.module)
self.assertFalse(hasattr(self.module.warn, '__code__'))
class PyWarnTests(WarnTests, unittest.TestCase):
module = py_warnings
# As an early adopter, we sanity check the
# test.support.import_fresh_module utility function
def test_pure_python(self):
self.assertFalse(original_warnings is self.module)
self.assertTrue(hasattr(self.module.warn, '__code__'))
class WCmdLineTests(BaseTest):
def test_improper_input(self):
# Uses the private _setoption() function to test the parsing
# of command-line warning arguments
with original_warnings.catch_warnings(module=self.module):
self.assertRaises(self.module._OptionError,
self.module._setoption, '1:2:3:4:5:6')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'bogus::Warning')
self.assertRaises(self.module._OptionError,
self.module._setoption, 'ignore:2::4:-5')
self.module._setoption('error::Warning::0')
self.assertRaises(UserWarning, self.module.warn, 'convert to error')
class CWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = c_warnings
class PyWCmdLineTests(WCmdLineTests, unittest.TestCase):
module = py_warnings
def test_improper_option(self):
# Same as above, but check that the message is printed out when
# the interpreter is executed. This also checks that options are
# actually parsed at all.
rc, out, err = assert_python_ok("-Wxxx", "-c", "pass")
self.assertIn(b"Invalid -W option ignored: invalid action: 'xxx'", err)
def test_warnings_bootstrap(self):
# Check that the warnings module does get loaded when -W<some option>
# is used (see issue #10372 for an example of silent bootstrap failure).
rc, out, err = assert_python_ok("-Wi", "-c",
"import sys; sys.modules['warnings'].warn('foo', RuntimeWarning)")
# '-Wi' was observed
self.assertFalse(out.strip())
self.assertNotIn(b'RuntimeWarning', err)
class _WarningsTests(BaseTest, unittest.TestCase):
"""Tests specific to the _warnings module."""
module = c_warnings
def test_filter(self):
# Everything should function even if 'filters' is not in warnings.
with original_warnings.catch_warnings(module=self.module) as w:
self.module.filterwarnings("error", "", Warning, "", 0)
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
del self.module.filters
self.assertRaises(UserWarning, self.module.warn,
'convert to error')
def test_onceregistry(self):
# Replacing or removing the onceregistry should be okay.
global __warningregistry__
message = UserWarning('onceregistry test')
try:
original_registry = self.module.onceregistry
__warningregistry__ = {}
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
self.module.filterwarnings("once", category=UserWarning)
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(w[-1].message, message)
del w[:]
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
# Test the resetting of onceregistry.
self.module.onceregistry = {}
__warningregistry__ = {}
self.module.warn('onceregistry test')
self.assertEqual(w[-1].message.args, message.args)
# Removal of onceregistry is okay.
del w[:]
del self.module.onceregistry
__warningregistry__ = {}
self.module.warn_explicit(message, UserWarning, "file", 42)
self.assertEqual(len(w), 0)
finally:
self.module.onceregistry = original_registry
def test_default_action(self):
# Replacing or removing defaultaction should be okay.
message = UserWarning("defaultaction test")
original = self.module.defaultaction
try:
with original_warnings.catch_warnings(record=True,
module=self.module) as w:
self.module.resetwarnings()
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 42,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
# One actual registry key plus the "version" key
self.assertEqual(len(registry), 2)
self.assertIn("version", registry)
del w[:]
# Test removal.
del self.module.defaultaction
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 43,
registry=registry)
self.assertEqual(w[-1].message, message)
self.assertEqual(len(w), 1)
self.assertEqual(len(registry), 2)
del w[:]
# Test setting.
self.module.defaultaction = "ignore"
__warningregistry__ = {}
registry = {}
self.module.warn_explicit(message, UserWarning, "<test>", 44,
registry=registry)
self.assertEqual(len(w), 0)
finally:
self.module.defaultaction = original
def test_showwarning_missing(self):
# Test that showwarning() missing is okay.
text = 'del showwarning test'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
self.module.warn(text)
result = stream.getvalue()
self.assertIn(text, result)
def test_showwarnmsg_missing(self):
# Test that _showwarnmsg() missing is okay.
text = 'del _showwarnmsg test'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module._showwarnmsg
with support.captured_output('stderr') as stream:
self.module.warn(text)
result = stream.getvalue()
self.assertIn(text, result)
def test_showwarning_not_callable(self):
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
self.module.showwarning = print
with support.captured_output('stdout'):
self.module.warn('Warning!')
self.module.showwarning = 23
self.assertRaises(TypeError, self.module.warn, "Warning!")
def test_show_warning_output(self):
# With showarning() missing, make sure that output is okay.
text = 'test show_warning'
with original_warnings.catch_warnings(module=self.module):
self.module.filterwarnings("always", category=UserWarning)
del self.module.showwarning
with support.captured_output('stderr') as stream:
warning_tests.inner(text)
result = stream.getvalue()
self.assertEqual(result.count('\n'), 2,
"Too many newlines in %r" % result)
first_line, second_line = result.split('\n', 1)
expected_file = os.path.splitext(warning_tests.__file__)[0] + '.py'
first_line_parts = first_line.rsplit(':', 3)
path, line, warning_class, message = first_line_parts
line = int(line)
self.assertEqual(expected_file, path)
self.assertEqual(warning_class, ' ' + UserWarning.__name__)
self.assertEqual(message, ' ' + text)
expected_line = ' ' + linecache.getline(path, line).strip() + '\n'
assert expected_line
self.assertEqual(second_line, expected_line)
def test_filename_none(self):
# issue #12467: race condition if a warning is emitted at shutdown
globals_dict = globals()
oldfile = globals_dict['__file__']
try:
catch = original_warnings.catch_warnings(record=True,
module=self.module)
with catch as w:
self.module.filterwarnings("always", category=UserWarning)
globals_dict['__file__'] = None
original_warnings.warn('test', UserWarning)
self.assertTrue(len(w))
finally:
globals_dict['__file__'] = oldfile
def test_stderr_none(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stderr = None; "
"import warnings; warnings.simplefilter('always'); "
"warnings.warn('Warning!')")
self.assertEqual(stdout, b'')
self.assertNotIn(b'Warning!', stderr)
self.assertNotIn(b'Error', stderr)
class WarningsDisplayTests(BaseTest):
"""Test the displaying of warnings and the ability to overload functions
related to displaying warnings."""
def test_formatwarning(self):
message = "msg"
category = Warning
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
file_line = linecache.getline(file_name, line_num).strip()
format = "%s:%s: %s: %s\n %s\n"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num))
# Test the 'line' argument.
file_line += " for the win!"
expect = format % (file_name, line_num, category.__name__, message,
file_line)
self.assertEqual(expect, self.module.formatwarning(message,
category, file_name, line_num, file_line))
def test_showwarning(self):
file_name = os.path.splitext(warning_tests.__file__)[0] + '.py'
line_num = 3
expected_file_line = linecache.getline(file_name, line_num).strip()
message = 'msg'
category = Warning
file_object = StringIO()
expect = self.module.formatwarning(message, category, file_name,
line_num)
self.module.showwarning(message, category, file_name, line_num,
file_object)
self.assertEqual(file_object.getvalue(), expect)
# Test 'line' argument.
expected_file_line += "for the win!"
expect = self.module.formatwarning(message, category, file_name,
line_num, expected_file_line)
file_object = StringIO()
self.module.showwarning(message, category, file_name, line_num,
file_object, expected_file_line)
self.assertEqual(expect, file_object.getvalue())
class CWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = c_warnings
class PyWarningsDisplayTests(WarningsDisplayTests, unittest.TestCase):
module = py_warnings
def test_tracemalloc(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'w') as fp:
fp.write(textwrap.dedent("""
def func():
f = open(__file__)
# Emit ResourceWarning
f = None
func()
"""))
res = assert_python_ok('-Wd', '-X', 'tracemalloc=2', support.TESTFN)
stderr = res.err.decode('ascii', 'replace')
# normalize newlines
stderr = '\n'.join(stderr.splitlines())
stderr = re.sub('<.*>', '<...>', stderr)
expected = textwrap.dedent('''
{fname}:5: ResourceWarning: unclosed file <...>
f = None
Object allocated at (most recent call first):
File "{fname}", lineno 3
f = open(__file__)
File "{fname}", lineno 7
func()
''')
expected = expected.format(fname=support.TESTFN).strip()
self.assertEqual(stderr, expected)
class CatchWarningTests(BaseTest):
"""Test catch_warnings()."""
def test_catch_warnings_restore(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure both showwarning and filters are restored when recording
with wmod.catch_warnings(module=wmod, record=True):
wmod.filters = wmod.showwarning = object()
self.assertTrue(wmod.filters is orig_filters)
self.assertTrue(wmod.showwarning is orig_showwarning)
# Same test, but with recording disabled
with wmod.catch_warnings(module=wmod, record=False):
wmod.filters = wmod.showwarning = object()
self.assertTrue(wmod.filters is orig_filters)
self.assertTrue(wmod.showwarning is orig_showwarning)
def test_catch_warnings_recording(self):
wmod = self.module
# Ensure warnings are recorded when requested
with wmod.catch_warnings(module=wmod, record=True) as w:
self.assertEqual(w, [])
self.assertTrue(type(w) is list)
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w[-1].message), "foo")
wmod.warn("bar")
self.assertEqual(str(w[-1].message), "bar")
self.assertEqual(str(w[0].message), "foo")
self.assertEqual(str(w[1].message), "bar")
del w[:]
self.assertEqual(w, [])
# Ensure warnings are not recorded when not requested
orig_showwarning = wmod.showwarning
with wmod.catch_warnings(module=wmod, record=False) as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
def test_catch_warnings_reentry_guard(self):
wmod = self.module
# Ensure catch_warnings is protected against incorrect usage
x = wmod.catch_warnings(module=wmod, record=True)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
# Same test, but with recording disabled
x = wmod.catch_warnings(module=wmod, record=False)
self.assertRaises(RuntimeError, x.__exit__)
with x:
self.assertRaises(RuntimeError, x.__enter__)
def test_catch_warnings_defaults(self):
wmod = self.module
orig_filters = wmod.filters
orig_showwarning = wmod.showwarning
# Ensure default behaviour is not to record warnings
with wmod.catch_warnings(module=wmod) as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
self.assertTrue(wmod.filters is not orig_filters)
self.assertTrue(wmod.filters is orig_filters)
if wmod is sys.modules['warnings']:
# Ensure the default module is this one
with wmod.catch_warnings() as w:
self.assertTrue(w is None)
self.assertTrue(wmod.showwarning is orig_showwarning)
self.assertTrue(wmod.filters is not orig_filters)
self.assertTrue(wmod.filters is orig_filters)
def test_record_override_showwarning_before(self):
# Issue #28835: If warnings.showwarning() was overriden, make sure
# that catch_warnings(record=True) overrides it again.
text = "This is a warning"
wmod = self.module
my_log = []
def my_logger(message, category, filename, lineno, file=None, line=None):
nonlocal my_log
my_log.append(message)
# Override warnings.showwarning() before calling catch_warnings()
with support.swap_attr(wmod, 'showwarning', my_logger):
with wmod.catch_warnings(module=wmod, record=True) as log:
self.assertIsNot(wmod.showwarning, my_logger)
wmod.simplefilter("always")
wmod.warn(text)
self.assertIs(wmod.showwarning, my_logger)
self.assertEqual(len(log), 1, log)
self.assertEqual(log[0].message.args[0], text)
self.assertEqual(my_log, [])
def test_record_override_showwarning_inside(self):
# Issue #28835: It is possible to override warnings.showwarning()
# in the catch_warnings(record=True) context manager.
text = "This is a warning"
wmod = self.module
my_log = []
def my_logger(message, category, filename, lineno, file=None, line=None):
nonlocal my_log
my_log.append(message)
with wmod.catch_warnings(module=wmod, record=True) as log:
wmod.simplefilter("always")
wmod.showwarning = my_logger
wmod.warn(text)
self.assertEqual(len(my_log), 1, my_log)
self.assertEqual(my_log[0].args[0], text)
self.assertEqual(log, [])
def test_check_warnings(self):
# Explicit tests for the test.support convenience wrapper
wmod = self.module
if wmod is not sys.modules['warnings']:
self.skipTest('module to test is not loaded warnings module')
with support.check_warnings(quiet=False) as w:
self.assertEqual(w.warnings, [])
wmod.simplefilter("always")
wmod.warn("foo")
self.assertEqual(str(w.message), "foo")
wmod.warn("bar")
self.assertEqual(str(w.message), "bar")
self.assertEqual(str(w.warnings[0].message), "foo")
self.assertEqual(str(w.warnings[1].message), "bar")
w.reset()
self.assertEqual(w.warnings, [])
with support.check_warnings():
# defaults to quiet=True without argument
pass
with support.check_warnings(('foo', UserWarning)):
wmod.warn("foo")
with self.assertRaises(AssertionError):
with support.check_warnings(('', RuntimeWarning)):
# defaults to quiet=False with argument
pass
with self.assertRaises(AssertionError):
with support.check_warnings(('foo', RuntimeWarning)):
wmod.warn("foo")
class CCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = c_warnings
class PyCatchWarningTests(CatchWarningTests, unittest.TestCase):
module = py_warnings
class EnvironmentVariableTests(BaseTest):
def test_single_warning(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning")
self.assertEqual(stdout, b"['ignore::DeprecationWarning']")
def test_comma_separated_warnings(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning,ignore::UnicodeWarning")
self.assertEqual(stdout,
b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
def test_envvar_and_command_line(self):
rc, stdout, stderr = assert_python_ok("-Wignore::UnicodeWarning", "-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONWARNINGS="ignore::DeprecationWarning")
self.assertEqual(stdout,
b"['ignore::DeprecationWarning', 'ignore::UnicodeWarning']")
def test_conflicting_envvar_and_command_line(self):
rc, stdout, stderr = assert_python_failure("-Werror::DeprecationWarning", "-c",
"import sys, warnings; sys.stdout.write(str(sys.warnoptions)); "
"warnings.warn('Message', DeprecationWarning)",
PYTHONWARNINGS="default::DeprecationWarning")
self.assertEqual(stdout,
b"['default::DeprecationWarning', 'error::DeprecationWarning']")
self.assertEqual(stderr.splitlines(),
[b"Traceback (most recent call last):",
b" File \"<string>\", line 1, in <module>",
b"DeprecationWarning: Message"])
@unittest.skipUnless(sys.getfilesystemencoding() != 'ascii',
'requires non-ascii filesystemencoding')
def test_nonascii(self):
rc, stdout, stderr = assert_python_ok("-c",
"import sys; sys.stdout.write(str(sys.warnoptions))",
PYTHONIOENCODING="utf-8",
PYTHONWARNINGS="ignore:DeprecaciónWarning")
self.assertEqual(stdout,
"['ignore:DeprecaciónWarning']".encode('utf-8'))
class CEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = c_warnings
class PyEnvironmentVariableTests(EnvironmentVariableTests, unittest.TestCase):
module = py_warnings
class BootstrapTest(unittest.TestCase):
def test_issue_8766(self):
# "import encodings" emits a warning whereas the warnings is not loaded
# or not completely loaded (warnings imports indirectly encodings by
# importing linecache) yet
with support.temp_cwd() as cwd, support.temp_cwd('encodings'):
# encodings loaded by initfsencoding()
assert_python_ok('-c', 'pass', PYTHONPATH=cwd)
# Use -W to load warnings module at startup
assert_python_ok('-c', 'pass', '-W', 'always', PYTHONPATH=cwd)
class FinalizationTest(unittest.TestCase):
@support.requires_type_collecting
def test_finalization(self):
# Issue #19421: warnings.warn() should not crash
# during Python finalization
code = """
import warnings
warn = warnings.warn
class A:
def __del__(self):
warn("test")
a=A()
"""
rc, out, err = assert_python_ok("-c", code)
# note: "__main__" filename is not correct, it should be the name
# of the script
self.assertEqual(err, b'__main__:7: UserWarning: test')
def test_late_resource_warning(self):
# Issue #21925: Emitting a ResourceWarning late during the Python
# shutdown must be logged.
expected = b"sys:1: ResourceWarning: unclosed file "
# don't import the warnings module
# (_warnings will try to import it)
code = "f = open(%a)" % __file__
rc, out, err = assert_python_ok("-Wd", "-c", code)
self.assertTrue(err.startswith(expected), ascii(err))
# import the warnings module
code = "import warnings; f = open(%a)" % __file__
rc, out, err = assert_python_ok("-Wd", "-c", code)
self.assertTrue(err.startswith(expected), ascii(err))
def setUpModule():
py_warnings.onceregistry.clear()
c_warnings.onceregistry.clear()
tearDownModule = setUpModule
if __name__ == "__main__":
unittest.main()
| mit | 6,476,962,123,155,680,000 | 41.049166 | 87 | 0.58642 | false |
psychopy/psychopy | psychopy/gui/wxgui.py | 1 | 15627 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""To build simple dialogues etc. (requires wxPython)
"""
from __future__ import absolute_import, print_function
from builtins import str
from builtins import super
from builtins import range
from psychopy import logging
import wx
import numpy
import os
from psychopy.localization import _translate
from pkg_resources import parse_version
OK = wx.ID_OK
thisVer = parse_version(wx.__version__)
def ensureWxApp():
# make sure there's a wxApp prior to showing a gui, e.g., for expInfo
# dialog
try:
wx.Dialog(None, -1) # not shown; FileDialog gives same exception
return True
except wx._core.PyNoAppError:
if thisVer < parse_version('2.9'):
return wx.PySimpleApp()
elif thisVer >= parse_version('4.0') and thisVer < parse_version('4.1'):
raise Exception(
"wx>=4.0 clashes with pyglet and making it unsafe "
"as a PsychoPy gui helper. Please install PyQt (4 or 5)"
" or wxPython3 instead.")
else:
return wx.App(False)
class Dlg(wx.Dialog):
"""A simple dialogue box. You can add text or input boxes
(sequentially) and then retrieve the values.
see also the function *dlgFromDict* for an **even simpler** version
**Example:** ::
from psychopy import gui
myDlg = gui.Dlg(title="JWP's experiment")
myDlg.addText('Subject info')
myDlg.addField('Name:')
myDlg.addField('Age:', 21)
myDlg.addText('Experiment Info')
myDlg.addField('Grating Ori:',45)
myDlg.addField('Group:', choices=["Test", "Control"])
myDlg.show() # show dialog and wait for OK or Cancel
if myDlg.OK: # then the user pressed OK
thisInfo = myDlg.data
print(thisInfo)
else:
print('user cancelled')
"""
def __init__(self, title=_translate('PsychoPy dialogue'),
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE | wx.DIALOG_NO_PARENT,
labelButtonOK=_translate(" OK "),
labelButtonCancel=_translate(" Cancel ")):
style = style | wx.RESIZE_BORDER
global app # avoid recreating for every gui
if pos is None:
pos = wx.DefaultPosition
app = ensureWxApp()
super().__init__(parent=None, id=-1, title=title, style=style, pos=pos)
self.inputFields = []
self.inputFieldTypes = []
self.inputFieldNames = []
self.data = []
# prepare a frame in which to hold objects
self.sizer = wx.BoxSizer(wx.VERTICAL)
# self.addText('') # insert some space at top of dialogue
self.pos = pos
self.labelButtonOK = labelButtonOK
self.labelButtonCancel = labelButtonCancel
def addText(self, text, color=''):
# the horizontal extent can depend on the locale and font in use:
font = self.GetFont()
dc = wx.WindowDC(self)
dc.SetFont(font)
textWidth, textHeight = dc.GetTextExtent(text)
textLength = wx.Size(textWidth + 50, textHeight)
_style = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_CENTER_HORIZONTAL
myTxt = wx.StaticText(self, -1, label=text, style=_style,
size=textLength)
if len(color):
myTxt.SetForegroundColour(color)
self.sizer.Add(myTxt, 1, wx.ALIGN_CENTER)
def addField(self, label='', initial='', color='', choices=None, tip=''):
"""Adds a (labelled) input field to the dialogue box, optional text
color and tooltip. Returns a handle to the field (but not to the
label). If choices is a list or tuple, it will create a dropdown
selector.
"""
self.inputFieldNames.append(label)
if choices:
self.inputFieldTypes.append(str)
else:
self.inputFieldTypes.append(type(initial))
if type(initial) == numpy.ndarray:
initial = initial.tolist() # convert numpy arrays to lists
container = wx.GridSizer(cols=2, vgap=0, hgap=10)
# create label
font = self.GetFont()
dc = wx.WindowDC(self)
dc.SetFont(font)
labelWidth, labelHeight = dc.GetTextExtent(label)
labelLength = wx.Size(labelWidth + 16, labelHeight)
inputLabel = wx.StaticText(self, -1, label,
size=labelLength,
style=wx.ALIGN_RIGHT)
if len(color):
inputLabel.SetForegroundColour(color)
_style = wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT
container.Add(inputLabel, 1, _style)
# create input control
if type(initial) == bool:
inputBox = wx.CheckBox(self, -1)
inputBox.SetValue(initial)
elif not choices:
inputWidth, inputHeight = dc.GetTextExtent(str(initial))
inputLength = wx.Size(max(50, inputWidth + 16),
max(25, inputHeight + 8))
inputBox = wx.TextCtrl(self, -1, str(initial),
size=inputLength)
else:
inputBox = wx.Choice(self, -1,
choices=[str(option)
for option in list(choices)])
# Somewhat dirty hack that allows us to treat the choice just like
# an input box when retrieving the data
inputBox.GetValue = inputBox.GetStringSelection
initial = choices.index(initial) if initial in choices else 0
inputBox.SetSelection(initial)
if len(color):
inputBox.SetForegroundColour(color)
if len(tip):
inputBox.SetToolTip(wx.ToolTip(tip))
container.Add(inputBox, 1, wx.ALIGN_CENTER_VERTICAL)
self.sizer.Add(container, 1, wx.ALIGN_CENTER)
self.inputFields.append(inputBox) # store this to get data back on OK
return inputBox
def addFixedField(self, label='', value='', tip=''):
"""Adds a field to the dialogue box (like addField) but the
field cannot be edited. e.g. Display experiment version.
tool-tips are disabled (by wx).
"""
thisField = self.addField(label, value, color='Gray', tip=tip)
# wx disables tooltips too; we pass them in anyway
thisField.Disable()
return thisField
def display(self):
"""Presents the dialog and waits for the user to press OK or CANCEL.
If user presses OK button, function returns a list containing the
updated values coming from each of the input fields created.
Otherwise, None is returned.
:return: self.data
"""
# add buttons for OK and Cancel
buttons = wx.BoxSizer(wx.HORIZONTAL)
OK = wx.Button(self, wx.ID_OK, self.labelButtonOK)
OK.SetDefault()
buttons.Add(OK)
CANCEL = wx.Button(self, wx.ID_CANCEL, self.labelButtonCancel)
buttons.Add(CANCEL)
self.sizer.Add(buttons, 1, flag=wx.ALIGN_RIGHT, border=5)
self.SetSizerAndFit(self.sizer)
if self.pos is None:
self.Center()
if self.ShowModal() == wx.ID_OK:
self.data = []
# get data from input fields
for n in range(len(self.inputFields)):
thisName = self.inputFieldNames[n]
thisVal = self.inputFields[n].GetValue()
thisType = self.inputFieldTypes[n]
# try to handle different types of input from strings
logging.debug("%s: %s" % (self.inputFieldNames[n],
str(thisVal)))
if thisType in (tuple, list, float, int):
# probably a tuple or list
exec("self.data.append(" + thisVal + ")") # evaluate it
elif thisType == numpy.ndarray:
exec("self.data.append(numpy.array(" + thisVal + "))")
elif thisType in (str, bool):
self.data.append(thisVal)
else:
logging.warning('unknown type:' + self.inputFieldNames[n])
self.data.append(thisVal)
self.OK = True
else:
self.OK = False
self.Destroy()
if self.OK:
return self.data
def show(self):
"""Presents the dialog and waits for the user to press either
OK or CANCEL.
When they do, dlg.OK will be set to True or False (according to
which button they pressed. If OK==True then dlg.data will be
populated with a list of values coming from each of the input
fields created.
"""
return self.display()
class DlgFromDict(Dlg):
"""Creates a dialogue box that represents a dictionary of values.
Any values changed by the user are change (in-place) by this
dialogue box.
Parameters
----------
sortKeys : bool
Whether the dictionary keys should be ordered alphabetically
for displaying.
copyDict : bool
If False, modify ``dictionary`` in-place. If True, a copy of
the dictionary is created, and the altered version (after
user interaction) can be retrieved from
:attr:~`psychopy.gui.DlgFromDict.dictionary`.
show : bool
Whether to immediately display the dialog upon instantiation.
If False, it can be displayed at a later time by calling
its `show()` method.
e.g.:
::
info = {'Observer':'jwp', 'GratingOri':45,
'ExpVersion': 1.1, 'Group': ['Test', 'Control']}
infoDlg = gui.DlgFromDict(dictionary=info,
title='TestExperiment', fixed=['ExpVersion'])
if infoDlg.OK:
print(info)
else:
print('User Cancelled')
In the code above, the contents of *info* will be updated to the values
returned by the dialogue box.
If the user cancels (rather than pressing OK),
then the dictionary remains unchanged. If you want to check whether
the user hit OK, then check whether DlgFromDict.OK equals
True or False
See GUI.py for a usage demo, including order and tip (tooltip).
"""
def __init__(self, dictionary, title='', fixed=None, order=None, tip=None,
sortKeys=True, copyDict=False, show=True,
sort_keys=None, copy_dict=None):
# We don't explicitly check for None identity
# for backward-compatibility reasons.
if not fixed:
fixed = []
if not order:
order = []
if not tip:
tip = dict()
# app = ensureWxApp() done by Dlg
super().__init__(title)
if copyDict:
self.dictionary = dictionary.copy()
else:
self.dictionary = dictionary
self._keys = list(self.dictionary.keys())
if sortKeys:
self._keys.sort()
if order:
self._keys = list(order) + list(set(self._keys).difference(set(order)))
types = dict()
for field in self._keys:
types[field] = type(self.dictionary[field])
tooltip = ''
if field in tip:
tooltip = tip[field]
if field in fixed:
self.addFixedField(field, self.dictionary[field], tip=tooltip)
elif type(self.dictionary[field]) in [list, tuple]:
self.addField(field, choices=self.dictionary[field],
tip=tooltip)
else:
self.addField(field, self.dictionary[field], tip=tooltip)
if show:
self.show()
def show(self):
"""Display the dialog.
"""
super().show()
if self.OK:
for n, thisKey in enumerate(self._keys):
self.dictionary[thisKey] = self.data[n]
def fileSaveDlg(initFilePath="", initFileName="",
prompt=_translate("Select file to save"),
allowed=None):
"""A simple dialogue allowing write access to the file system.
(Useful in case you collect an hour of data and then try to
save to a non-existent directory!!)
:parameters:
initFilePath: string
default file path on which to open the dialog
initFileName: string
default file name, as suggested file
prompt: string (default "Select file to open")
can be set to custom prompts
allowed: string
A string to specify file filters.
e.g. "BMP files (*.bmp)|*.bmp|GIF files (*.gif)|*.gif"
See http://www.wxpython.org/docs/api/wx.FileDialog-class.html
for further details
If initFilePath or initFileName are empty or invalid then
current path and empty names are used to start search.
If user cancels the None is returned.
"""
if allowed is None:
allowed = "All files (*.*)|*.*"
# "txt (*.txt)|*.txt"
# "pickled files (*.pickle, *.pkl)|*.pickle"
# "shelved files (*.shelf)|*.shelf"
global app # avoid recreating for every gui
app = ensureWxApp()
dlg = wx.FileDialog(None, prompt, initFilePath,
initFileName, allowed, wx.FD_SAVE)
if dlg.ShowModal() == OK:
# get names of images and their directory
outName = dlg.GetFilename()
outPath = dlg.GetDirectory()
dlg.Destroy()
# tmpApp.Destroy() # this causes an error message for some reason
fullPath = os.path.join(outPath, outName)
else:
fullPath = None
return fullPath
def fileOpenDlg(tryFilePath="",
tryFileName="",
prompt=_translate("Select file(s) to open"),
allowed=None):
"""A simple dialogue allowing read access to the file system.
:parameters:
tryFilePath: string
default file path on which to open the dialog
tryFileName: string
default file name, as suggested file
prompt: string (default "Select file to open")
can be set to custom prompts
allowed: string (available since v1.62.01)
a string to specify file filters.
e.g. "BMP files (*.bmp)|*.bmp|GIF files (*.gif)|*.gif"
See http://www.wxpython.org/docs/api/wx.FileDialog-class.html
for further details
If tryFilePath or tryFileName are empty or invalid then
current path and empty names are used to start search.
If user cancels, then None is returned.
"""
if allowed is None:
allowed = ("PsychoPy Data (*.psydat)|*.psydat|"
"txt (*.txt,*.dlm,*.csv)|*.txt;*.dlm;*.csv|"
"pickled files (*.pickle, *.pkl)|*.pickle|"
"shelved files (*.shelf)|*.shelf|"
"All files (*.*)|*.*")
global app # avoid recreating for every gui
app = ensureWxApp()
dlg = wx.FileDialog(None, prompt, tryFilePath, tryFileName, allowed,
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_MULTIPLE)
if dlg.ShowModal() == OK:
# get names of images and their directory
fullPaths = dlg.GetPaths()
else:
fullPaths = None
dlg.Destroy()
return fullPaths
| gpl-3.0 | 6,954,707,252,832,250,000 | 35.769412 | 83 | 0.580278 | false |
mvondracek/wifimitm | wifimitm/tests/test_capture.py | 1 | 1702 | #!/usr/bin/env python3
"""
Unit tests for capture module
Automation of MitM Attack on WiFi Networks
Bachelor's Thesis UIFS FIT VUT
Martin Vondracek
2016
"""
import time as _time
import typing as _typing
import unittest as _unittest
from wifimitm.updatableProcess import UpdatableProcess
from wifimitm.capture import Dumpcap
from wifimitm.model import WirelessInterface as _WirelessInterface
__author__ = 'Martin Vondracek'
__email__ = '[email protected]'
class TestDumpcap(_unittest.TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self.process = None # type: _typing.Optional[Dumpcap]
@classmethod
def setUpClass(cls):
# NOTE: `cls.network_interface_name` needs to be a valid wireless interface name
cls.network_interface_name = 'wlp1s0' # type: str
cls.network_interface_obj = _WirelessInterface(cls.network_interface_name) # type: _WirelessInterface
def tearDown(self):
if self.process and issubclass(type(self.process), UpdatableProcess):
self.process.cleanup()
del self.process
def test__init__(self):
self.process = Dumpcap(self.network_interface_obj)
def test_update(self):
with Dumpcap(self.network_interface_obj) as self.process:
self.assertEqual(self.process.state, type(self.process).State.STARTED)
_time.sleep(1) # some time for dumpcap to generate some output
self.process.update()
self.assertNotEqual(self.process.state, type(self.process).State.STARTED)
self.process.stop()
self.assertEqual(self.process.state, type(self.process).State.TERMINATED)
| mit | -3,613,036,492,186,075,000 | 34.458333 | 110 | 0.694477 | false |
OpenDaisy/daisy-api | daisy/gateway.py | 1 | 12287 | # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import glance_store
from oslo_log import log as logging
from daisy.api import authorization
from daisy.api import policy
from daisy.api import property_protections
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import store_utils
import daisy.db
import daisy.domain
import daisy.location
import daisy.notifier
import daisy.quota
try:
import daisy.search
daisy_search = daisy.search
except ImportError:
daisy_search = None
LOG = logging.getLogger(__name__)
class Gateway(object):
def __init__(self, db_api=None, store_api=None, notifier=None,
policy_enforcer=None, es_api=None):
self.db_api = db_api or daisy.db.get_api()
self.store_api = store_api or glance_store
self.store_utils = store_utils
self.notifier = notifier or daisy.notifier.Notifier()
self.policy = policy_enforcer or policy.Enforcer()
if es_api:
self.es_api = es_api
else:
self.es_api = daisy_search.get_api() if daisy_search else None
def get_image_factory(self, context):
image_factory = daisy.domain.ImageFactory()
store_image_factory = daisy.location.ImageFactoryProxy(
image_factory, context, self.store_api, self.store_utils)
quota_image_factory = daisy.quota.ImageFactoryProxy(
store_image_factory, context, self.db_api, self.store_utils)
policy_image_factory = policy.ImageFactoryProxy(
quota_image_factory, context, self.policy)
notifier_image_factory = daisy.notifier.ImageFactoryProxy(
policy_image_factory, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pif = property_protections.ProtectedImageFactoryProxy(
notifier_image_factory, context, property_rules)
authorized_image_factory = authorization.ImageFactoryProxy(
pif, context)
else:
authorized_image_factory = authorization.ImageFactoryProxy(
notifier_image_factory, context)
return authorized_image_factory
def get_image_member_factory(self, context):
image_factory = daisy.domain.ImageMemberFactory()
quota_image_factory = daisy.quota.ImageMemberFactoryProxy(
image_factory, context, self.db_api, self.store_utils)
policy_member_factory = policy.ImageMemberFactoryProxy(
quota_image_factory, context, self.policy)
authorized_image_factory = authorization.ImageMemberFactoryProxy(
policy_member_factory, context)
return authorized_image_factory
def get_repo(self, context):
image_repo = daisy.db.ImageRepo(context, self.db_api)
store_image_repo = daisy.location.ImageRepoProxy(
image_repo, context, self.store_api, self.store_utils)
quota_image_repo = daisy.quota.ImageRepoProxy(
store_image_repo, context, self.db_api, self.store_utils)
policy_image_repo = policy.ImageRepoProxy(
quota_image_repo, context, self.policy)
notifier_image_repo = daisy.notifier.ImageRepoProxy(
policy_image_repo, context, self.notifier)
if property_utils.is_property_protection_enabled():
property_rules = property_utils.PropertyRules(self.policy)
pir = property_protections.ProtectedImageRepoProxy(
notifier_image_repo, context, property_rules)
authorized_image_repo = authorization.ImageRepoProxy(
pir, context)
else:
authorized_image_repo = authorization.ImageRepoProxy(
notifier_image_repo, context)
return authorized_image_repo
def get_task_factory(self, context):
task_factory = daisy.domain.TaskFactory()
policy_task_factory = policy.TaskFactoryProxy(
task_factory, context, self.policy)
notifier_task_factory = daisy.notifier.TaskFactoryProxy(
policy_task_factory, context, self.notifier)
authorized_task_factory = authorization.TaskFactoryProxy(
notifier_task_factory, context)
return authorized_task_factory
def get_task_repo(self, context):
task_repo = daisy.db.TaskRepo(context, self.db_api)
policy_task_repo = policy.TaskRepoProxy(
task_repo, context, self.policy)
notifier_task_repo = daisy.notifier.TaskRepoProxy(
policy_task_repo, context, self.notifier)
authorized_task_repo = authorization.TaskRepoProxy(
notifier_task_repo, context)
return authorized_task_repo
def get_task_stub_repo(self, context):
task_stub_repo = daisy.db.TaskRepo(context, self.db_api)
policy_task_stub_repo = policy.TaskStubRepoProxy(
task_stub_repo, context, self.policy)
notifier_task_stub_repo = daisy.notifier.TaskStubRepoProxy(
policy_task_stub_repo, context, self.notifier)
authorized_task_stub_repo = authorization.TaskStubRepoProxy(
notifier_task_stub_repo, context)
return authorized_task_stub_repo
def get_task_executor_factory(self, context):
task_repo = self.get_task_repo(context)
image_repo = self.get_repo(context)
image_factory = self.get_image_factory(context)
return daisy.domain.TaskExecutorFactory(task_repo,
image_repo,
image_factory)
def get_metadef_namespace_factory(self, context):
ns_factory = daisy.domain.MetadefNamespaceFactory()
policy_ns_factory = policy.MetadefNamespaceFactoryProxy(
ns_factory, context, self.policy)
notifier_ns_factory = daisy.notifier.MetadefNamespaceFactoryProxy(
policy_ns_factory, context, self.notifier)
authorized_ns_factory = authorization.MetadefNamespaceFactoryProxy(
notifier_ns_factory, context)
return authorized_ns_factory
def get_metadef_namespace_repo(self, context):
ns_repo = daisy.db.MetadefNamespaceRepo(context, self.db_api)
policy_ns_repo = policy.MetadefNamespaceRepoProxy(
ns_repo, context, self.policy)
notifier_ns_repo = daisy.notifier.MetadefNamespaceRepoProxy(
policy_ns_repo, context, self.notifier)
authorized_ns_repo = authorization.MetadefNamespaceRepoProxy(
notifier_ns_repo, context)
return authorized_ns_repo
def get_metadef_object_factory(self, context):
object_factory = daisy.domain.MetadefObjectFactory()
policy_object_factory = policy.MetadefObjectFactoryProxy(
object_factory, context, self.policy)
notifier_object_factory = daisy.notifier.MetadefObjectFactoryProxy(
policy_object_factory, context, self.notifier)
authorized_object_factory = authorization.MetadefObjectFactoryProxy(
notifier_object_factory, context)
return authorized_object_factory
def get_metadef_object_repo(self, context):
object_repo = daisy.db.MetadefObjectRepo(context, self.db_api)
policy_object_repo = policy.MetadefObjectRepoProxy(
object_repo, context, self.policy)
notifier_object_repo = daisy.notifier.MetadefObjectRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefObjectRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_resource_type_factory(self, context):
resource_type_factory = daisy.domain.MetadefResourceTypeFactory()
policy_resource_type_factory = policy.MetadefResourceTypeFactoryProxy(
resource_type_factory, context, self.policy)
notifier_resource_type_factory = (
daisy.notifier.MetadefResourceTypeFactoryProxy(
policy_resource_type_factory, context, self.notifier)
)
authorized_resource_type_factory = (
authorization.MetadefResourceTypeFactoryProxy(
notifier_resource_type_factory, context)
)
return authorized_resource_type_factory
def get_metadef_resource_type_repo(self, context):
resource_type_repo = daisy.db.MetadefResourceTypeRepo(
context, self.db_api)
policy_object_repo = policy.MetadefResourceTypeRepoProxy(
resource_type_repo, context, self.policy)
notifier_object_repo = daisy.notifier.MetadefResourceTypeRepoProxy(
policy_object_repo, context, self.notifier)
authorized_object_repo = authorization.MetadefResourceTypeRepoProxy(
notifier_object_repo, context)
return authorized_object_repo
def get_metadef_property_factory(self, context):
prop_factory = daisy.domain.MetadefPropertyFactory()
policy_prop_factory = policy.MetadefPropertyFactoryProxy(
prop_factory, context, self.policy)
notifier_prop_factory = daisy.notifier.MetadefPropertyFactoryProxy(
policy_prop_factory, context, self.notifier)
authorized_prop_factory = authorization.MetadefPropertyFactoryProxy(
notifier_prop_factory, context)
return authorized_prop_factory
def get_metadef_property_repo(self, context):
prop_repo = daisy.db.MetadefPropertyRepo(context, self.db_api)
policy_prop_repo = policy.MetadefPropertyRepoProxy(
prop_repo, context, self.policy)
notifier_prop_repo = daisy.notifier.MetadefPropertyRepoProxy(
policy_prop_repo, context, self.notifier)
authorized_prop_repo = authorization.MetadefPropertyRepoProxy(
notifier_prop_repo, context)
return authorized_prop_repo
def get_metadef_tag_factory(self, context):
tag_factory = daisy.domain.MetadefTagFactory()
policy_tag_factory = policy.MetadefTagFactoryProxy(
tag_factory, context, self.policy)
notifier_tag_factory = daisy.notifier.MetadefTagFactoryProxy(
policy_tag_factory, context, self.notifier)
authorized_tag_factory = authorization.MetadefTagFactoryProxy(
notifier_tag_factory, context)
return authorized_tag_factory
def get_metadef_tag_repo(self, context):
tag_repo = daisy.db.MetadefTagRepo(context, self.db_api)
policy_tag_repo = policy.MetadefTagRepoProxy(
tag_repo, context, self.policy)
notifier_tag_repo = daisy.notifier.MetadefTagRepoProxy(
policy_tag_repo, context, self.notifier)
authorized_tag_repo = authorization.MetadefTagRepoProxy(
notifier_tag_repo, context)
return authorized_tag_repo
def get_catalog_search_repo(self, context):
if self.es_api is None:
# TODO(mriedem): Make this a separate exception or change to
# warning/error logging in Liberty once we're past string freeze.
# See bug 1441764.
LOG.debug('The search and index services are not available. '
'Ensure you have the necessary prerequisite '
'dependencies installed like elasticsearch to use these '
'services.')
raise exception.ServiceUnavailable()
search_repo = daisy.search.CatalogSearchRepo(context, self.es_api)
policy_search_repo = policy.CatalogSearchRepoProxy(
search_repo, context, self.policy)
return policy_search_repo
| apache-2.0 | 8,565,885,868,649,443,000 | 45.896947 | 79 | 0.674697 | false |
fabioz/mu-repo | mu_repo/repos_with_changes.py | 1 | 2688 | from mu_repo.action_diff import ParsePorcelain
from mu_repo.execute_parallel_command import ParallelCmd, ExecuteInParallel
#===================================================================================================
# ComputeReposWithChanges
#===================================================================================================
def ComputeReposWithChanges(repos_and_curr_branch, params):
'''
:param repos_and_curr_branch: list(tuple(str, str))
A list with the repos and the current branch for each repo.
:param params: Params
Used to get the git to be used.
:return: dict(str->bool)
A dictionary where the key is the repo and the value a boolean indicating whether
there are local changes in that repo.
'''
commands = []
for repo, _branch in repos_and_curr_branch:
commands.append(ParallelCmd(repo, [params.config.git] + ['status', '-s']))
repos_with_changes = {}
def OnOutput(output):
if not output.stdout:
repos_with_changes[output.repo] = False
else:
repos_with_changes[output.repo] = True
ExecuteInParallel(commands, on_output=OnOutput)
return repos_with_changes
#===================================================================================================
# ComputeReposWithChangesFromCurrentBranchToOrigin
#===================================================================================================
def ComputeReposWithChangesFromCurrentBranchToOrigin(repos_and_curr_branch, params, target_branch=None):
'''
:param repos_and_curr_branch: list(tuple(str, str))
A list with the repos and the current branch for each repo.
:param params: Params
Used to get the git to be used.
:param target_branch: str
If passed, instead of comparing with the same current branch in the origin, it'll compare
with origin/target_branch.
:return: list(str)
Returns a list with the repositories that have some difference from branch to origin/branch.
'''
commands = []
for repo, curr_branch in repos_and_curr_branch:
commands.append(
ParallelCmd(repo, [params.config.git] + ('diff --name-only -z origin/%s' % (
target_branch or curr_branch,)).split()))
repos_with_changes = []
def OnOutput(output):
for _entry in ParsePorcelain(output.stdout, only_split=True):
#Iterate: if we have a match, add it as having a change!
repos_with_changes.append(output.repo)
break
ExecuteInParallel(commands, on_output=OnOutput)
return repos_with_changes
| gpl-3.0 | 3,322,202,696,225,994,000 | 39.119403 | 104 | 0.56436 | false |
cisco-openstack/tempest | tempest/api/network/admin/test_routers.py | 1 | 11969 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.network import base
from tempest.common import identity
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
CONF = config.CONF
class RoutersAdminTest(base.BaseAdminNetworkTest):
# NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest
# as some router operations, such as enabling or disabling SNAT
# require admin credentials by default
def _cleanup_router(self, router):
self.delete_router(router)
def _create_router(self, name=None, admin_state_up=False,
external_network_id=None, enable_snat=None):
# associate a cleanup with created routers to avoid quota limits
router = self.create_router(name, admin_state_up,
external_network_id, enable_snat)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._cleanup_router, router)
return router
@classmethod
def skip_checks(cls):
super(RoutersAdminTest, cls).skip_checks()
if not utils.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
@decorators.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')
def test_create_router_setting_project_id(self):
# Test creating router from admin user setting project_id.
project = data_utils.rand_name('test_tenant_')
description = data_utils.rand_name('desc_')
project = identity.identity_utils(self.os_admin).create_project(
name=project, description=description)
project_id = project['id']
self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
project_id)
name = data_utils.rand_name('router-')
create_body = self.admin_routers_client.create_router(
name=name, tenant_id=project_id)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_routers_client.delete_router,
create_body['router']['id'])
self.assertEqual(project_id, create_body['router']['tenant_id'])
@decorators.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_router_with_default_snat_value(self):
# Create a router with default snat rule
router = self._create_router(
external_network_id=CONF.network.public_network_id)
self._verify_router_gateway(
router['id'], {'network_id': CONF.network.public_network_id,
'enable_snat': True})
@decorators.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_create_router_with_snat_explicit(self):
name = data_utils.rand_name('snat-router')
# Create a router enabling snat attributes
enable_snat_states = [False, True]
for enable_snat in enable_snat_states:
external_gateway_info = {
'network_id': CONF.network.public_network_id,
'enable_snat': enable_snat}
create_body = self.admin_routers_client.create_router(
name=name, external_gateway_info=external_gateway_info)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_routers_client.delete_router,
create_body['router']['id'])
# Verify snat attributes after router creation
self._verify_router_gateway(create_body['router']['id'],
exp_ext_gw_info=external_gateway_info)
def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
show_body = self.admin_routers_client.show_router(router_id)
actual_ext_gw_info = show_body['router']['external_gateway_info']
if exp_ext_gw_info is None:
self.assertIsNone(actual_ext_gw_info)
return
# Verify only keys passed in exp_ext_gw_info
for k, v in exp_ext_gw_info.items():
self.assertEqual(v, actual_ext_gw_info[k])
def _verify_gateway_port(self, router_id):
list_body = self.admin_ports_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router_id,
device_owner="network:router_gateway")
self.assertEqual(len(list_body['ports']), 1)
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
self.assertNotEmpty(fixed_ips)
# Assert that all of the IPs from the router gateway port
# are allocated from a valid public subnet.
public_net_body = self.admin_networks_client.show_network(
CONF.network.public_network_id)
public_subnet_ids = public_net_body['network']['subnets']
for fixed_ip in fixed_ips:
subnet_id = fixed_ip['subnet_id']
self.assertIn(subnet_id, public_subnet_ids)
@decorators.idempotent_id('6cc285d8-46bf-4f36-9b1a-783e3008ba79')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_set_gateway(self):
router = self._create_router()
self.routers_client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id})
# Verify operation - router
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id})
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_set_gateway_with_snat_explicit(self):
router = self._create_router()
self.admin_routers_client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': True})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': True})
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_set_gateway_without_snat(self):
router = self._create_router()
self.admin_routers_client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('ad81b7ee-4f81-407b-a19c-17e623f763e8')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_unset_gateway(self):
router = self._create_router(
external_network_id=CONF.network.public_network_id)
self.routers_client.update_router(router['id'],
external_gateway_info={})
self._verify_router_gateway(router['id'])
# No gateway port expected
list_body = self.admin_ports_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router['id'])
self.assertFalse(list_body['ports'])
@decorators.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374')
@utils.requires_ext(extension='ext-gw-mode', service='network')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
def test_update_router_reset_gateway_without_snat(self):
router = self._create_router(
external_network_id=CONF.network.public_network_id)
self.admin_routers_client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@decorators.idempotent_id('cbe42f84-04c2-11e7-8adb-fa163e4fa634')
@utils.requires_ext(extension='ext-gw-mode', service='network')
def test_create_router_set_gateway_with_fixed_ip(self):
# At first create an external network and then use that
# to create address and delete
network_name = data_utils.rand_name(self.__class__.__name__)
network_1 = self.admin_networks_client.create_network(
name=network_name, **{'router:external': True})['network']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_networks_client.delete_network,
network_1['id'])
subnet = self.create_subnet(
network_1, client=self.admin_subnets_client, enable_dhcp=False)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.admin_subnets_client.delete_subnet, subnet['id'])
port = self.admin_ports_client.create_port(
name=data_utils.rand_name(self.__class__.__name__),
network_id=network_1['id'])['port']
self.admin_ports_client.delete_port(port_id=port['id'])
fixed_ip = {
'subnet_id': port['fixed_ips'][0]['subnet_id'],
'ip_address': port['fixed_ips'][0]['ip_address']
}
external_gateway_info = {
'network_id': network_1['id'],
'external_fixed_ips': [fixed_ip]
}
# Create a router and set gateway to fixed_ip
router = self.admin_routers_client.create_router(
external_gateway_info=external_gateway_info)['router']
self.admin_routers_client.delete_router(router['id'])
# Examine router's gateway is equal to fixed_ip
self.assertEqual(router['external_gateway_info'][
'external_fixed_ips'][0]['ip_address'],
fixed_ip['ip_address'])
class RoutersIpV6AdminTest(RoutersAdminTest):
_ip_version = 6
| apache-2.0 | 8,357,789,090,428,257,000 | 46.3083 | 78 | 0.623277 | false |
biswajit-mandal/contrail-webui-third-party | fetch_packages.py | 1 | 11100 | #!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import errno
import re
import shutil
import subprocess
import sys, getopt
import platform
from time import sleep
_RETRIES = 5
_OPT_VERBOSE = None
_OPT_DRY_RUN = None
_PACKAGE_CACHE='/tmp/cache/' + os.environ['USER'] + '/webui_third_party'
_NODE_MODULES='./node_modules'
_TMP_NODE_MODULES=_PACKAGE_CACHE + '/' + _NODE_MODULES
_TAR_COMMAND = ['tar']
_CACHED_PKG_DISTROS = ('Ubuntu', 'Red Hat', 'CentOS', 'darwin')
from lxml import objectify
def getFilename(pkg, url):
element = pkg.find("local-filename")
if element:
return str(element)
(path, filename) = url.rsplit('/', 1)
m = re.match(r'\w+\?\w+=(.*)', filename)
if m:
filename = m.group(1)
return filename
def setTarCommand():
if isTarGnuVersion():
print 'GNU tar found. we will skip the no-unknown-keyword warning'
global _TAR_COMMAND
_TAR_COMMAND = ['tar', '--warning=no-unknown-keyword']
else:
print 'No GNU tar. will use default tar utility'
def isTarGnuVersion():
cmd = subprocess.Popen(['tar', '--version'],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
(first, _) = output.split('\n', 1)
if first.lower().find('gnu') != -1:
return True
return False
def getTarDestination(tgzfile, compress_flag):
cmd = subprocess.Popen( _TAR_COMMAND + [ '--exclude=.*','-' + compress_flag + 'tf', tgzfile],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
(first, _) = output.split('\n', 1)
fields = first.split('/')
return fields[0]
def getZipDestination(tgzfile):
cmd = subprocess.Popen(['unzip', '-t', tgzfile],
stdout=subprocess.PIPE)
(output, _) = cmd.communicate()
lines = output.split('\n')
for line in lines:
print line
m = re.search(r'testing:\s+([\w\-\.]+)\/', line)
if m:
return m.group(1)
return None
def getFileDestination(file):
start = file.rfind('/')
if start < 0:
return None
return file[start+1:]
def ApplyPatches(pkg):
stree = pkg.find('patches')
if stree is None:
return
for patch in stree.getchildren():
cmd = ['patch']
if patch.get('strip'):
cmd.append('-p')
cmd.append(patch.get('strip'))
if _OPT_VERBOSE:
print "Patching %s <%s..." % (' '.join(cmd), str(patch))
if not _OPT_DRY_RUN:
fp = open(str(patch), 'r')
proc = subprocess.Popen(cmd, stdin = fp)
proc.communicate()
#def VarSubst(cmdstr, filename):
# return re.sub(r'\${filename}', filename, cmdstr)
def GetOSDistro():
distro = ''
if sys.platform == 'darwin':
return sys.platform
else:
try:
return platform.linux_distribution()[0]
except:
pass
return distro
def DownloadPackage(url, ccfile, pkg):
md5 = pkg.md5
pkg.ccfile = ccfile
if url.find('$distro') != -1:
# Platform specific package download
distro = GetOSDistro()
if distro == '':
md5 = md5.other
# Remove the $distro from the url and try
url = url.replace('/$distro', '')
# Change the pkg format to npm download the dependencies
if pkg.format == 'npm-cached':
pkg.format = 'npm'
else:
# check if we have the distro in our cache
found = False
for cached_pkg in _CACHED_PKG_DISTROS:
if cached_pkg in distro:
distro = cached_pkg
found = True
break
if found == False:
# Remove the $distro from the url and try
url = url.replace('/$distro', '')
# Change the pkg format to npm download the dependencies
md5 = md5.other
if pkg.format == 'npm-cached':
pkg.format = 'npm'
else:
distro = distro.lower().replace(" ", "")
url = url.replace('$distro', distro)
md5 = md5[distro]
pkg.distro = distro
# Change the ccfile, add distro before the package name
idx = ccfile.rfind("/")
pkgCachePath = ccfile[:idx] + "/" + distro
pkg.pkgCachePath = pkgCachePath
pkg.ccfile = pkgCachePath + "/" + ccfile[idx + 1:]
ccfile = pkg.ccfile.text
# Now create the directory
try:
os.makedirs(pkgCachePath)
except OSError:
pass
print url
#Check if the package already exists
if os.path.isfile(ccfile):
md5sum = FindMd5sum(ccfile)
if md5sum == md5:
return pkg
else:
os.remove(ccfile)
retry_count = 0
while True:
subprocess.call(['wget', '--no-check-certificate', '-O', ccfile, url])
md5sum = FindMd5sum(ccfile)
if _OPT_VERBOSE:
print "Calculated md5sum: %s" % md5sum
print "Expected md5sum: %s" % md5
if md5sum == md5:
return pkg
elif retry_count <= _RETRIES:
os.remove(ccfile)
retry_count += 1
sleep(1)
continue
else:
raise RuntimeError("MD5sum %s, expected(%s) dosen't match for the "
"downloaded package %s" % (md5sum, md5, ccfile))
return pkg
def ProcessPackage(pkg):
print "Processing %s ..." % (pkg['name'])
url = str(pkg['url'])
filename = getFilename(pkg, url)
ccfile = _PACKAGE_CACHE + '/' + filename
installArguments = pkg.find('install-arguments')
if pkg.format == 'npm-cached':
try:
shutil.rmtree(str(_NODE_MODULES + '/' + pkg['name']))
except OSError as exc:
pass
try:
os.makedirs(_NODE_MODULES)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
print 'mkdirs of ' + _NODE_MODULES + ' failed.. Exiting..'
return
#ccfile = _NODE_MODULES + '/' + filename
pkg = DownloadPackage(url, ccfile, pkg)
#
# Determine the name of the directory created by the package.
# unpack-directory means that we 'cd' to the given directory before
# unpacking.
#
ccfile = pkg.ccfile.text
dest = None
unpackdir = pkg.find('unpack-directory')
if unpackdir:
dest = str(unpackdir)
else:
if pkg.format == 'tgz':
dest = getTarDestination(ccfile, 'z')
elif pkg.format == 'npm-cached':
dest = _NODE_MODULES + '/' + getTarDestination(ccfile, 'z')
elif pkg.format == 'tbz':
dest = getTarDestination(ccfile, 'j')
elif pkg.format == 'zip':
dest = getZipDestination(ccfile)
elif pkg.format == 'npm':
dest = getTarDestination(ccfile, 'z')
elif pkg.format == 'file':
dest = getFileDestination(ccfile)
#
# clean directory before unpacking and applying patches
#
rename = pkg.find('rename')
if rename and pkg.format == 'npm-cached':
rename = _NODE_MODULES + '/' + str(rename)
if rename and os.path.isdir(str(rename)):
if not _OPT_DRY_RUN:
shutil.rmtree(str(rename))
elif dest and os.path.isdir(dest):
if _OPT_VERBOSE:
print "Clean directory %s" % dest
if not _OPT_DRY_RUN:
shutil.rmtree(dest)
if unpackdir:
try:
os.makedirs(str(unpackdir))
except OSError as exc:
pass
cmd = None
if pkg.format == 'tgz':
cmd = _TAR_COMMAND + ['-zxvf', ccfile]
elif pkg.format == 'tbz':
cmd = _TAR_COMMAND + ['-jxvf', ccfile]
elif pkg.format == 'zip':
cmd = ['unzip', '-o', ccfile]
elif pkg.format == 'npm':
newDir = _PACKAGE_CACHE
if 'distro' in pkg:
newDir = newDir + pkg.distro
cmd = ['npm', 'install', ccfile, '--prefix', newDir]
if installArguments:
cmd.append(str(installArguments))
elif pkg.format == 'file':
cmd = ['cp', '-af', ccfile, dest]
elif pkg.format == 'npm-cached':
cmd = _TAR_COMMAND + ['-zxvf', ccfile, '-C', _NODE_MODULES]
else:
print 'Unexpected format: %s' % (pkg.format)
return
print 'Issuing command: %s' % (cmd)
if not _OPT_DRY_RUN:
cd = None
if unpackdir:
cd = str(unpackdir)
if pkg.format == 'npm':
try:
os.makedirs(_NODE_MODULES)
os.makedirs(newDir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
print 'mkdirs of ' + _NODE_MODULES + ' ' + newDir + ' failed.. Exiting..'
return
npmCmd = ['cp', '-af', newDir + "/" + _NODE_MODULES + '/' + pkg['name'],
'./node_modules/']
if os.path.exists(newDir + '/' + pkg['name']):
cmd = npmCmd
else:
try:
p = subprocess.Popen(cmd, cwd = cd)
ret = p.wait()
if ret is not 0:
sys.exit('Terminating: ProcessPackage with return code: %d' % ret);
cmd = npmCmd
except OSError:
print ' '.join(cmd) + ' could not be executed, bailing out!'
return
p = subprocess.Popen(cmd, cwd = cd)
ret = p.wait()
if ret is not 0:
sys.exit('Terminating: ProcessPackage with return code: %d' % ret);
if rename and dest:
os.rename(dest, str(rename))
ApplyPatches(pkg)
def FindMd5sum(anyfile):
if sys.platform == 'darwin':
cmd = ['md5', '-r']
else:
cmd = ['md5sum']
cmd.append(anyfile)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = proc.communicate()
md5sum = stdout.split()[0]
return md5sum
def main(filename):
tree = objectify.parse(filename)
root = tree.getroot()
#Check which version of tar is used and skip warning messages.
setTarCommand()
for object in root.iterchildren():
if object.tag == 'package':
ProcessPackage(object)
if __name__ == '__main__':
try:
opts,args = getopt.getopt(sys.argv[1:],"f:",["file="])
except getopt.GetoptError:
raise RuntimeError("Error in parsing the options/arguments")
xmlfile = None
for opt,arg in opts:
if opt in ("-f","--file"):
xmlfile = arg
os.chdir(os.path.dirname(os.path.realpath(__file__)))
try:
os.makedirs(_PACKAGE_CACHE)
except OSError:
pass
if xmlfile == None:
main('packages.xml')
else:
main(xmlfile)
| apache-2.0 | -6,112,047,089,367,081,000 | 30.355932 | 97 | 0.53036 | false |
jbloomlab/phydms | phydmslib/simulate.py | 1 | 6135 | """Functions for performing simulations, mostly using ``pyvolve``.
Written by Jesse Bloom and Sarah Hilton.
"""
import os
import sys
import math
import phydmslib.models
from phydmslib.constants import (NT_TO_INDEX, AA_TO_INDEX, ALMOST_ZERO)
import pyvolve
import numpy
from tempfile import mkstemp
import random
import Bio.Phylo
def pyvolvePartitions(model, divselection=None):
"""Get list of `pyvolve` partitions for `model`.
Args:
`model` (`phydmslib.models.Models` object)
The model used for the simulations. Currently only
certain `Models` are supported (e.g., `YNGKP`,
`ExpCM`)
`divselection` (`None` or 2-tuple `(divomega, divsites)`)
Set this option if you want to simulate a subset of sites
as under diversifying selection (e.g., an `omega` different
than that used by `model`. In this case, `divomega` is
the omega for this subset of sites, and `divsites` is a list
of the sites in 1, 2, ... numbering.
Returns:
`partitions` (`list` of `pyvolve.Partition` objects)
Can be fed into `pyvolve.Evolver` to simulate evolution.
"""
codons = pyvolve.genetics.Genetics().codons
codon_dict = pyvolve.genetics.Genetics().codon_dict
purines = pyvolve.genetics.Genetics().purines
if divselection:
(divomega, divsites) = divselection
else:
divsites = []
assert all((1 <= r <= model.nsites for r in divsites))
partitions = []
for r in range(model.nsites):
matrix = numpy.zeros((len(codons), len(codons)), dtype='float')
for (xi, x) in enumerate(codons):
for (yi, y) in enumerate(codons):
ntdiffs = [(x[j], y[j]) for j in range(3) if x[j] != y[j]]
if len(ntdiffs) == 1:
(xnt, ynt) = ntdiffs[0]
qxy = 1.0
if (xnt in purines) == (ynt in purines):
qxy *= model.kappa
(xaa, yaa) = (codon_dict[x], codon_dict[y])
fxy = 1.0
if xaa != yaa:
if type(model) ==\
phydmslib.models.ExpCM_empirical_phi_divpressure:
fxy *= (model.omega *
(1 + model.omega2 * model.deltar[r]))
elif r + 1 in divsites:
fxy *= divomega
else:
fxy *= model.omega
if type(model) in [phydmslib.models.ExpCM,
phydmslib.models.ExpCM_empirical_phi,
(phydmslib.models
.ExpCM_empirical_phi_divpressure)]:
qxy *= model.phi[NT_TO_INDEX[ynt]]
pix = model.pi[r][AA_TO_INDEX[xaa]]**model.beta
piy = model.pi[r][AA_TO_INDEX[yaa]]**model.beta
if abs(pix - piy) > ALMOST_ZERO:
fxy *= math.log(piy / pix) / (1.0 - pix / piy)
elif type(model) == phydmslib.models.YNGKP_M0:
for p in range(3):
qxy *= model.phi[p][NT_TO_INDEX[y[p]]]
else:
raise ValueError("Can't handle model type {0}".format(
type(model)))
matrix[xi][yi] = model.mu * qxy * fxy
matrix[xi][xi] = -matrix[xi].sum()
# create model in way that captures print statements from pyvolve
old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
try:
m = pyvolve.Model("custom", {"matrix": matrix})
finally:
sys.stdout.close()
sys.stdout = old_stdout
partitions.append(pyvolve.Partition(models=m, size=1))
return partitions
def simulateAlignment(model, treeFile, alignmentPrefix, randomSeed=False):
"""
Simulate an alignment given a model and tree (units = subs/site).
Simulations done using `pyvolve`.
Args:
`model` (`phydmslib.models.Models` object)
The model used for the simulations. Only
models that can be passed to `pyvolve.Partitions`
are supported.
`treeFile` (str)
Name of newick file used to simulate the sequences.
The branch lengths should be in substitutions per site,
which is the default units for all `phydms` outputs.
`alignmentPrefix`
Prefix for the files created by `pyvolve`.
The result of this function is a simulated FASTA alignment
file with the name having the prefix giving by `alignmentPrefix`
and the suffix `'_simulatedalignment.fasta'`.
"""
if randomSeed is False:
pass
else:
random.seed(randomSeed)
# Transform the branch lengths by dividing by the model `branchScale`
tree = Bio.Phylo.read(treeFile, 'newick')
for node in tree.get_terminals() + tree.get_nonterminals():
if (node.branch_length is None) and (node == tree.root):
node.branch_length = 1e-06
else:
node.branch_length /= model.branchScale
fd, temp_path = mkstemp()
Bio.Phylo.write(tree, temp_path, 'newick')
os.close(fd)
pyvolve_tree = pyvolve.read_tree(file=temp_path)
os.remove(temp_path)
# Make the `pyvolve` partition
partitions = pyvolvePartitions(model)
# Simulate the alignment
alignment = '{0}_simulatedalignment.fasta'.format(alignmentPrefix)
info = '_temp_{0}info.txt'.format(alignmentPrefix)
rates = '_temp_{0}_ratefile.txt'.format(alignmentPrefix)
evolver = pyvolve.Evolver(partitions=partitions, tree=pyvolve_tree)
evolver(seqfile=alignment, infofile=info, ratefile=rates)
for f in [rates, info, "custom_matrix_frequencies.txt"]:
if os.path.isfile(f):
os.remove(f)
assert os.path.isfile(alignment)
if __name__ == '__main__':
import doctest
doctest.testmod()
| gpl-3.0 | 3,644,956,278,045,778,400 | 37.829114 | 78 | 0.558924 | false |
IBT-FMI/SAMRI | samri/utilities.py | 1 | 13239 | import multiprocessing as mp
import nibabel as nib
import nipype.interfaces.io as nio
import numpy as np
import os
import pandas as pd
from itertools import product
from joblib import Parallel, delayed
from os import path
# PyBIDS 0.6.5 and 0.10.2 compatibility
try:
from bids.grabbids import BIDSLayout
except ModuleNotFoundError:
from bids.layout import BIDSLayout
try:
from bids.grabbids import BIDSValidator
except ModuleNotFoundError:
from bids_validator import BIDSValidator
N_PROCS=max(mp.cpu_count()-2,2)
def bids_autofind_df(bids_dir,
**kwargs
):
"""Automatically generate a BIDS-like Pandas Dataframe index based on the more flexible `samri.utilities.bids_autofind` function.
Parameters
----------
bids_dir : str
Path to BIDS-formatted directory
type : {"func", "anat"}
Which type to source data for (currently only supports "func", and "anat" - ideally we could extend this to include "dwi").
Returns
-------
path_template : str
String which can be formatted with any of the dictionaries in `substitutions`
substitutions : list of dicti
A substitution iterator usable as a standard SAMRI function input, which (together with `path_template`) unambiguoulsy identifies input files for analysis.
"""
bids_dir = path.abspath(path.expanduser(bids_dir))
if not os.path.exists(bids_dir):
print('{} path not found'.format(bids_dir))
else: print('{} path found'.format(bids_dir))
path_template, substitutions = bids_autofind(bids_dir, **kwargs)
for i in substitutions:
i['path'] = path_template.format(**i)
df = pd.DataFrame.from_records(substitutions)
return df
def bids_autofind(bids_dir,
typ='',
path_template="sub-{{subject}}/ses-{{session}}/{typ}/sub-{{subject}}_ses-{{session}}_task-{{task}}_acq-{{acquisition}}_run-{{run}}.nii.gz",
match_regex='',
):
"""Automatically generate a BIDS path template and a substitution iterator (list of dicts, as produced by `samri.utilities.bids_substitution_iterator`, and used as a standard input SAMRI function input) from a BIDS-respecting directory.
Parameters
----------
bids_dir : str
Path to BIDS-formatted directory
type : {"func", "anat"}
Which type to source data for (currently only supports "func", and "anat" - ideally we could extend this to include "dwi").
Returns
-------
path_template : str
String which can be formatted with any of the dictionaries in `substitutions`
substitutions : list of dicti
A substitution iterator usable as a standard SAMRI function input, which (together with `path_template`) unambiguoulsy identifies input files for analysis.
"""
bids_dir = path.abspath(path.expanduser(bids_dir))
if match_regex:
pass
elif typ in ("func","dwi"):
match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/'+typ+'/.*?_task-(?P<task>.+).*?_acq-(?P<acquisition>.+)\.nii.gz'
elif typ == "":
match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/.*?_task-(?P<task>.+).*?_acq-(?P<acquisition>.+).*?_run-(?P<run>[0-9]+).*?\.nii.gz'
elif typ == "anat":
match_regex = '.+/sub-(?P<sub>.+)/ses-(?P<ses>.+)/anat/.*?_(?P<task>.+).*?_acq-(?P<acquisition>.+)\.nii.gz'
if path_template[:1] != '/' and 'bids_dir' not in path_template:
path_template = '{bids_dir}/'+path_template
path_template = path_template.format(bids_dir=bids_dir, typ=typ)
datafind = nio.DataFinder()
datafind.inputs.root_paths = bids_dir
datafind.inputs.match_regex = match_regex
datafind_res = datafind.run()
substitutions = []
for ix, i in enumerate(datafind_res.outputs.out_paths):
substitution = {}
try:
substitution["acquisition"] = datafind_res.outputs.acquisition[ix]
except AttributeError: pass
try:
substitution["subject"] = datafind_res.outputs.sub[ix]
except AttributeError: pass
try:
substitution["session"] = datafind_res.outputs.ses[ix]
except AttributeError: pass
try:
substitution["task"] = datafind_res.outputs.task[ix]
except AttributeError: pass
try:
substitution["run"] = datafind_res.outputs.run[ix]
except AttributeError: pass
try:
substitution["modality"] = datafind_res.outputs.modality[ix]
except AttributeError: pass
reconstructed_path = path.abspath(path.expanduser(path_template.format(**substitution)))
original_path = path.abspath(path.expanduser(i))
if reconstructed_path != original_path:
print("Original DataFinder path: "+original_path)
print("Reconstructed path: "+reconstructed_path)
raise ValueError("The reconstructed file path based on the substitution dictionary and the path template, is not identical to the corresponding path, found by `nipype.interfaces.io.DataFinder`. See string values above.")
substitutions.append(substitution)
return path_template, substitutions
def bids_substitution_iterator(sessions, subjects,
tasks=[''],
runs=[''],
data_dir='',
preprocessing_dir='',
acquisitions=[''],
modalities=[''],
l1_dir=None,
l1_workdir=None,
preprocessing_workdir=None,
validate_for_template=None,
):
"""Returns a list of dictionaries, which can be used together with a template string to identify large sets of input data files for SAMRI functions.
Parameters
----------
sessions : list
A list of session identifiers to include in the iterator.
subjects : list
A list of subject identifiers to include in the iterator.
TASKS : list, optional
A list of scan types to include in the iterator.
data_dir : str, optional
Path to the data root (this is where SAMRI creates e.g. `preprocessing`, `l1`, or `l2` directories.
preprocessing_dir : str, optional
String identifying the preprocessing pipeline name from which to provide an iterator.
l1_dir : str, optional
String identifying the level 1 pipeline name from which to provide an iterator. If `None` the level 1 pipeline name is assumed to correspond to the preprocessing pipeline name (`preprocessing_dir`)
l1_workdir : str, optional
String identifying the level 1 work directory name from which to provide an iterator. If `None` the level 1 work directory name is assumed to be the level 1 pipeline name (`l1_dir`) suffixed with the string `"_work"`.
preprocessing_workdir : str, optional
String identifying the preprocessing work directory name from which to provide an iterator. If `None` the preprocessing work directory name is assumed to be the preprocessing pipeline name (`preprocessing_dir`) suffixed with the string `"_work"`.
validate_for_template : str, optional
Template string for which to check whether a file exists.
If no file exists given a substitution dictionary, that dictionary will not be added to the retuned list.
If this variable is an empty string (or otherwise evaluates as False) no check is performed, and all dictionaries (i.e. all input value permutations) are returned.
Returns
-------
list of dictionaries
With the keys being `"data_dir"`, `"subject"`, `"session"`, `"task"`!!!.
"""
substitutions=[]
subjects = list(dict.fromkeys(subjects))
sessions = list(dict.fromkeys(sessions))
tasks = list(dict.fromkeys(tasks))
runs = list(dict.fromkeys(runs))
acquisitions = list(dict.fromkeys(acquisitions))
modalities = list(dict.fromkeys(modalities))
for subject, session, task, run, acquisition, modality in product(subjects, sessions, tasks, runs, acquisitions, modalities):
substitution={}
substitution["data_dir"] = data_dir
substitution["task"] = task
substitution["run"] = run
substitution["session"] = session
substitution["subject"] = subject
substitution["acquisition"] = acquisition
substitution["modality"] = modality
if validate_for_template:
check_file = validate_for_template.format(**substitution)
check_file = path.abspath(path.expanduser(check_file))
if path.isfile(check_file):
substitutions.append(substitution)
else: print('no file under path')
else:
substitutions.append(substitution)
return substitutions
def iter_collapse_by_path(in_files, out_files,
n_jobs=None,
n_jobs_percentage=0.75,
):
"""Patalellized iteration of `samri.utilities.collapse_by_path`."""
if not n_jobs:
n_jobs = max(int(round(mp.cpu_count()*n_jobs_percentage)),2)
out_files = Parallel(n_jobs=n_jobs, verbose=0, backend="threading")(map(delayed(collapse_by_path),
in_files,
out_files,
))
return out_files
def collapse_by_path(in_path, out_path):
"""Wrapper for `samri.utilities.collapse`, supporting an input path and saving object to an output path."""
in_path = os.path.abspath(os.path.expanduser(in_path))
out_path = os.path.abspath(os.path.expanduser(out_path))
img = nib.load(in_path)
img = collapse(img)
out_dir = os.path.dirname(out_path)
if not os.path.exists(out_dir):
#race-condition safe:
try:
os.makedirs(out_dir)
except OSError:
pass
nib.save(img, out_path)
return out_path
def collapse(img,
min_dim=3,
):
"""
Collapse a nibabel image allong its last axis
Parameters
----------
img : nibabel.nifti1.Nifti1Image
Nibabel image to be collapsed.
min_dim : int
Bimensionality beyond which not to collapse.
"""
ndim = 0
data = img.get_data()
for i in range(len(img.header['dim'])-1):
current_dim = img.header['dim'][i+1]
if current_dim == 1:
break
ndim += 1
if ndim <= min_dim:
return img
img.header['dim'][0] = ndim
img.header['pixdim'][ndim+1:] = 0
data = np.mean(data,axis=(ndim-1))
img = nib.nifti1.Nifti1Image(data, img.affine, img.header)
return img
def session_irregularity_filter(bids_path, exclude_irregularities):
"""
Create a Pandas Dataframe recording which session-animal combinations should be excluded, based on an irregularity criterion.
Parameters
----------
bids_path: str
Path to the root of the BIDS directory containing `_sessions.tsv` files.
exclude_irregularities: list of str
Irregularity strings which will disqualify a scan.
The logic for the exclusion is "any", if even one of the irregularities is present, the scan will be disqualified.
"""
bids_path = os.path.abspath(os.path.expanduser(bids_path))
sessions = []
for sub_dir in os.listdir(bids_path):
sub_path = os.path.join(bids_path,sub_dir)
if os.path.isdir(sub_path) and sub_dir[:4] == 'sub-':
session_file = os.path.join(sub_path,'{}_sessions.tsv'.format(sub_dir))
if os.path.isfile(session_file):
_df = pd.read_csv(session_file, sep='\t')
subject = sub_dir[4:]
for ix, row in _df.iterrows():
ses_entry = {}
session = row['session_id'][4:]
irregularities = row['irregularities']
ses_entry['subject'] = subject
ses_entry['session'] = session
ses_entry['irregularities'] = irregularities
try:
ses_entry['exclude'] = any(i in irregularities for i in exclude_irregularities)
except TypeError:
ses_entry['exclude'] = False
sessions.append(ses_entry)
return pd.DataFrame(sessions)
def ordered_structures(
atlas='/usr/share/mouse-brain-templates/dsurqec_40micron_labels.nii',
mapping='/usr/share/mouse-brain-templates/dsurqe_labels.csv',
label_columns=['right label','left label'],
structure_column='Structure',
remove_zero_label=True,
):
"""Return a list of structure names corresponding to the ascending order of numerical labels in the atlas image.
Parameters
----------
atlas : str or nibabel.Nifti1Image, optional
Path to a NIfTI atlas file.
mapping : str or pandas.DataFrame, optional
Path to a CSV mapping file containing columns which include the string specified under `structure_column` and the strings specified under `label_columns`.
The latter of these columns need to include the numerical values found in the data matrix of the file whose path is assigned to `atlas`.
label_columns : list, optional
Names of columns in the `mapping` file under which numerical labels are specified.
This can be a length-2 list if separate columns exist for left and right labels; in this case the function will perform the differentiation implicitly.
structure_column : str, optional
The name of the column, which in the `mapping` file records the structure names.
remove_zero_label : bool, optional
Whether to disconsider the zero label in the atlas image.
"""
if isinstance(atlas, str):
atlas = path.abspath(path.expanduser(atlas))
atlas = nib.load(atlas)
if isinstance(mapping, str):
mapping = path.abspath(path.expanduser(mapping))
mapping = pd.read_csv(mapping)
atlas_data = atlas.get_data()
atlas_data_unique = np.unique(atlas_data)
if remove_zero_label:
atlas_data_unique = atlas_data_unique[atlas_data_unique != 0]
structure_names = []
for label in atlas_data_unique:
structure_name = []
for label_column in label_columns:
try:
structure = mapping.loc[mapping[label_column]==label,structure_column].values[0]
except IndexError:
pass
else:
if any(i in label_column for i in ['right','Right','RIGHT']):
lateralized_structure = '{} (R)'.format(structure)
structure_name.append(lateralized_structure)
elif any(i in label_column for i in ['left','Left','LEFT']):
lateralized_structure = '{} (L)'.format(structure)
structure_name.append(lateralized_structure)
else:
structure_name.append(structure)
if len(structure_name) != 1:
structure_name = structure
else:
structure_name = structure_name[0]
structure_names.append(structure_name)
return structure_names
| gpl-3.0 | 6,278,964,047,147,906,000 | 36.610795 | 248 | 0.720296 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2017_06_01/operations/_operations.py | 1 | 4729 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2017_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.OperationListResult"]
"""Lists all of the available Storage Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2017_06_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Storage/operations'} # type: ignore
| mit | -814,979,723,569,684,100 | 42.385321 | 133 | 0.639459 | false |
artisanofcode/flask-generic-views | docs/conf.py | 1 | 10315 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Flask-Generic-Views documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 30 04:16:44 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import ast
import os
import re
import shlex
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Flask-Generic-Views'
copyright = '2015, Daniel Knell'
author = 'Daniel Knell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('../flask_generic_views/__init__.py', 'rb') as f:
release = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
# The short X.Y version.
version=release.split('-')[0]
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-Generic-Viewsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flask-Generic-Views.tex', 'Flask-Generic-Views Documentation',
'Daniel Knell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flask-generic-views', 'Flask-Generic-Views Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flask-Generic-Views', 'Flask-Generic-Views Documentation',
author, 'Flask-Generic-Views', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/dev', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'flask': ('http://flask.pocoo.org/docs/', None),
'jinja': ('http://jinja.pocoo.org/docs/', None),
'sqlalchemy': ('http://www.sqlalchemy.org/docs/', None),
'wtforms': ('https://wtforms.readthedocs.org/en/latest/', None),
'flaskwtf': ('https://flask-wtf.readthedocs.org/en/latest/', None),
'flasksqlalchemy': ('http://flask-sqlalchemy.pocoo.org/', None)}
| mit | 8,997,364,476,504,771,000 | 32.49026 | 90 | 0.693747 | false |
padraic-padraic/StabilizerSearch | stabilizer_search/search/brute_force.py | 1 | 3184 | from itertools import combinations
from math import factorial
from six import PY2
from random import shuffle
from ._search import _Search
from ._result import _Result
from ..core.linalg import get_projector, projection_distance, subspace_distance
from ..stabilizers import get_stabilizer_states
from numba import njit
import numpy as np
def ncr(n, r):
return factorial(n)//factorial(r)//factorial(n-r)
def do_brute_force(n_qubits, stabilizer_states, target, distance_func,
chi=None, lower_bound=1, real_only=False):
"""Function which performs the brute force search for stabilizer rank.
Takes a number of qubits and the target state as input, and returns
success: Bool, did the method succeed?
chi: The rank found
basis: the resulting decomposition"""
dims = pow(2, n_qubits)
shuffle(stabilizer_states)
if chi is None:
for i in range(lower_bound, pow(2, n_qubits)):
print('Test with {} states.'.format(i))
for basis in combinations(stabilizer_states, i):
projector = get_projector([b for b in basis])
distance = distance_func(target, projector)
if np.allclose(distance, 0.):
return True, i, basis
return False, dims, None
else:
print('Searching brute force with chi={}'.format(chi))
# print('Got {} combinations to test'.format(ncr(len(stabilizer_states), chi)))
for basis in combinations(stabilizer_states, chi):
projector = get_projector([b for b in basis])
distance = distance_func(target, projector)
if np.allclose(distance, 0.):
return True, chi, basis
return False, chi, None
def brute_force_search(n_qubits, target, **kwargs):
real_only = kwargs.pop(
'real_only',
np.all(np.nonzero(np.imag(target))))
stabilizer_states = get_stabilizer_states(
n_qubits, real_only=real_only)
if target.shape[1] == 1:
distance_func = projection_distance
else:
distance_func = subspace_distance
do_brute_force(n_qubits, stabilizer_states, target, distance_func,
real_only=real_only, **kwargs)
class BruteForceResult(_Result):
ostring = """
The Brute Force method for the state {target_state} on {n_qubits} qubits
{success}.
We found a decomposition with stabilizer rank {chi}, which looked like:
{decomposition}.
"""
def __init__(self, *args):
args = list(args)
self.basis = args[-1]
args[-1] = self.parse_decomposition(args[-1])
super(BruteForceResult, self).__init__(*args)
def parse_decomposition(self, decomposition):
"""Additional method for BruceForceResult that takes the decompositions
and converts them to strings."""
if decomposition is None:
return "Bubkis"
return "\n".join(str(state) for state in decomposition)
class BruteForceSearch(_Search):
Result_Class = BruteForceResult
func = staticmethod(brute_force_search)
def __init__(self, *args, **kwargs):
super(BruteForceSearch, self).__init__(*args, **kwargs)
| gpl-3.0 | -47,993,443,974,661,890 | 34.775281 | 87 | 0.643216 | false |
CLVsol/odoo_clvhealth_jcafb | project/install.py | 1 | 6159 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import base
import admin_groups_id
import data_admin_groups_id
import xmlrpclib
import erppeek
def install_update_module(module, update, config_admin=False):
modules_to_update = base.modules_to_update
print '%s%s' % ('--> ', module)
if module in modules_to_update:
new_module = base.install_update_module(module, True)
else:
new_module = base.install_update_module(module, update)
if new_module and config_admin:
method = '%s%s' % ('Administrator_groups_id_', module)
print '%s%s' % ('--> ', method)
methodToCall = getattr(admin_groups_id, method)
result = methodToCall()
method = '%s%s' % ('Data_Administrator_groups_id_', module)
print '%s%s' % ('--> ', method)
methodToCall = getattr(data_admin_groups_id, method)
result = methodToCall()
return new_module
def clvhealth_jcafb_install():
update = base.update
print '--> create_database()'
newDB = base.create_database()
if newDB:
print '--> YourCompany()'
base.YourCompany()
print '--> Administrator()'
base.Administrator()
print '--> Administrator_groups_id_updt()'
base.Administrator_groups_id_updt()
print '--> Demo_User()'
base.Demo_User()
print '--> Data_Administrator_User()'
base.Data_Administrator_User()
else:
client = erppeek.Client(base.server,
db=base.dbname,
user=base.admin_user,
password=base.admin_user_pw,
verbose=False)
proxy = client.model('ir.module.module')
proxy.update_list()
new_module = install_update_module('mail', update)
new_module = install_update_module('hr', update)
new_module = install_update_module('website', update)
new_module = install_update_module('marketing', update)
new_module = install_update_module('survey', update)
new_module = install_update_module('l10n_br_base', update)
new_module = install_update_module('l10n_br_zip', update)
# new_module = install_update_module('l10n_br_data_zip', update)
new_module = install_update_module('clv_base', update, True)
new_module = install_update_module('clv_base_cst', update)
new_module = install_update_module('clv_tag', update, True)
new_module = install_update_module('clv_tag_cst', update)
new_module = install_update_module('clv_annotation', update, True)
new_module = install_update_module('clv_annotation_cst', update)
new_module = install_update_module('clv_document', update, True)
new_module = install_update_module('clv_document_cst', update)
new_module = install_update_module('clv_address', update, True)
new_module = install_update_module('l10n_br_clv_address', update)
new_module = install_update_module('clv_address_cst', update)
new_module = install_update_module('clv_person', update, True)
new_module = install_update_module('l10n_br_clv_person', update)
new_module = install_update_module('clv_person_cst', update)
new_module = install_update_module('clv_family', update, True)
new_module = install_update_module('clv_family_cst', update)
new_module = install_update_module('clv_community', update, True)
new_module = install_update_module('clv_community_cst', update)
new_module = install_update_module('clv_patient', update, True)
new_module = install_update_module('clv_patient_cst', update)
new_module = install_update_module('clv_person_mng', update, True)
new_module = install_update_module('l10n_br_clv_person_mng', update)
new_module = install_update_module('clv_employee', update)
new_module = install_update_module('clv_employee_cst', update)
new_module = install_update_module('jcafb_2016_surveys', update)
new_module = install_update_module('jcafb_2016_consent_forms', update)
# new_module = install_update_module('product', update)
new_module = install_update_module('clv_lab_test', update, True)
new_module = install_update_module('clv_lab_test_cst', update)
# new_module = install_update_module('clv_pointing', update, True)
def secondsToStr(t):
return "%d:%02d:%02d.%03d" % \
reduce(lambda ll, b: divmod(ll[0], b) + ll[1:], [(t*1000,), 1000, 60, 60])
if __name__ == '__main__':
from time import time
base.get_arguments()
start = time()
print '--> Executing clvhealth_jcafb_install.py...'
print '--> Executing clvhealth_jcafb_install()...'
clvhealth_jcafb_install()
print '--> clvhealth_jcafb_install.py'
print '--> Execution time:', secondsToStr(time() - start)
| agpl-3.0 | 411,191,009,144,921,200 | 34.194286 | 82 | 0.585972 | false |
mattstibbs/blockbuster-server | blockbuster/messaging/bb_pushover_handler.py | 1 | 1566 | import http.client
import urllib
import logging
import redis
from rq import Queue
import blockbuster.config as config
import blockbuster.bb_auditlogger as bb_auditlogger
# Set up RQ queue
conn = redis.from_url(config.REDIS_URL)
q = Queue(connection=conn)
log = logging.getLogger(__name__)
def send_push_notification(a, b, c, d):
q.enqueue(send_push_message, a, b, c, d)
log.debug("Pushover notification queued.")
def send_push_message(user_key, message, message_title, service_number):
try:
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": config.pushover_app_token,
"user": user_key,
"title": message_title,
"message": message,
"url": "sms:" + service_number,
"url_title": "Send SMS to BlockBuster",
"priority": 1
}), {"Content-type": "application/x-www-form-urlencoded"})
log.debug(conn.getresponse())
audit_description = "Key:" + user_key + \
";Title:" + message_title + \
";Message:" + message
bb_auditlogger.BBAuditLoggerFactory().create().logAudit('bgwrk', 'SEND-PUSHOVER', audit_description)
print("Pushover notification sent to " + user_key)
except Exception as e:
log.error("Error sending Pushover notification \n" + str(e))
bb_auditlogger.BBAuditLoggerFactory().create().logException('bgwrk','SEND-PUSHOVER', str(e))
| mit | -340,374,393,596,961,600 | 30.32 | 108 | 0.619413 | false |
richardkiss/pycoinnet | pycoinnet/examples/address_keeper.py | 1 | 5205 | #!/usr/bin/env python
"""
This bitcoin client does little more than try to keep an up-to-date
list of available clients in a text file "addresses".
"""
import asyncio
import binascii
import logging
import random
import time
from pycoinnet.helpers.standards import initial_handshake, version_data_for_peer
from pycoinnet.peer.BitcoinPeerProtocol import BitcoinPeerProtocol
class AddressDB(object):
def __init__(self, path):
self.path = path
self.addresses = self.load_addresses()
self.shuffled = []
def load_addresses(self):
"""
Return an array of (host, port, timestamp) triples.
"""
addresses = {}
try:
with open(self.path) as f:
for l in f:
timestamp, host, port = l[:-1].split("/")
timestamp = int(timestamp)
port = int(port)
addresses[(host, port)] = timestamp
except Exception:
logging.error("can't open %s, using default", self.path)
for h in [
"bitseed.xf2.org", "dnsseed.bluematt.me",
"seed.bitcoin.sipa.be", "dnsseed.bitcoin.dashjr.org"
]:
addresses[(h, 8333)] = 1
return addresses
def next_address(self):
if len(self.shuffled) == 0:
self.shuffled = list(self.addresses.keys())
random.shuffle(self.shuffled)
return self.shuffled.pop()
def remove_address(self, host, port):
key = (host, port)
del self.addresses[key]
def add_address(self, host, port, timestamp):
key = (host, port)
old_timestamp = self.addresses.get(key) or timestamp
self.addresses[key] = max(timestamp, old_timestamp)
def add_addresses(self, addresses):
for timestamp, host, port in addresses:
self.add_address(host, port, timestamp)
def save(self):
if len(self.addresses) < 2:
logging.error("too few addresses: not overwriting")
return
with open(self.path, "w") as f:
for host, port in self.addresses:
f.write(
"%d/%s/%d\n" % (self.addresses[(host, port)], host, port))
class AddressKeeper:
def __init__(self, peer, address_db):
next_message = peer.new_get_next_message_f(lambda name, data: name == 'addr')
def get_msg_addr():
peer.send_msg("getaddr")
name, data = yield from next_message()
date_address_tuples = data["date_address_tuples"]
logging.info("got %s message from %s with %d entries", name, peer, len(date_address_tuples))
address_db.add_addresses(
(timestamp, address.ip_address.exploded, address.port)
for timestamp, address in date_address_tuples)
address_db.save()
# we got addresses from this client. Exit loop and disconnect
peer.transport.close()
self.get_addr_task = asyncio.Task(get_msg_addr())
@asyncio.coroutine
def connect_to_remote(event_loop, magic_header, address_db, connections):
host, port = address_db.next_address()
logging.info("connecting to %s port %d", host, port)
try:
transport, peer = yield from event_loop.create_connection(
lambda: BitcoinPeerProtocol(magic_header),
host=host, port=port)
except Exception:
logging.exception("failed to connect to %s:%d", host, port)
address_db.remove_address(host, port)
address_db.save()
return
try:
logging.info("connected to %s:%d", host, port)
yield from asyncio.wait_for(peer.connection_made_future, timeout=None)
version_parameters = version_data_for_peer(peer)
yield from initial_handshake(peer, version_parameters)
AddressKeeper(peer, address_db)
address_db.add_address(host, port, int(time.time()))
connections.add(peer)
except Exception:
logging.exception("exception talking to %s:%d", host, port)
logging.info("done talking to %s:%d", host, port)
def keep_minimum_connections(event_loop, min_connection_count=4):
connections = set()
address_db = AddressDB("addresses.txt")
magic_header = binascii.unhexlify('F9BEB4D9') # use 0B110907 for testnet3
tasks = set()
while 1:
logging.debug("connection count is %d", len(connections))
difference = min_connection_count - len(connections)
for i in range(difference*2):
f = asyncio.Task(connect_to_remote(
event_loop, magic_header, address_db, connections))
tasks.add(f)
f.add_callback(lambda x: tasks.discard(f))
yield from asyncio.sleep(10)
def main():
logging.basicConfig(
level=logging.INFO,
format=('%(asctime)s [%(process)d] [%(levelname)s] '
'%(filename)s:%(lineno)d %(message)s'))
event_loop = asyncio.get_event_loop()
# kmc_task is never used, but if we don't keep a reference, the
# Task is collected (and stops)
kmc_task = asyncio.Task(keep_minimum_connections(event_loop))
event_loop.run_forever()
main()
| mit | -1,233,479,060,370,532,000 | 34.408163 | 104 | 0.603266 | false |
nesl/mercury | Services/Mapping/rescaleScripts/rescaleBatch.py | 1 | 1170 | import os
import sys
filenames = [
'Albuquerque_6x6.tfix',
'Atlanta_6x6.tfix',
'Austin_6x6.tfix',
'Baltimore_6x6.tfix',
'Boston_6x6.tfix',
'Charlotte_6x6.tfix',
'Chicago_6x6.tfix',
'Cleveland_6x6.tfix',
'Columbus_6x6.tfix',
'Dallas_6x6.tfix',
'Denver_6x6.tfix',
'Detroit_6x6.tfix',
'El_Paso_6x6.tfix',
'Fort_Worth_6x6.tfix',
'Fresno_6x6.tfix',
'Houston_6x6.tfix',
'Indianapolis_6x6.tfix',
'Jacksonville_6x6.tfix',
'Kansas_City_2_6x6.tfix',
'Kansas_City_6x6.tfix',
'Las_Vegas_6x6.tfix',
'Long_Beach_6x6.tfix',
'Los_Angeles_6x6.tfix',
'Memphis_6x6.tfix',
'Mesa_6x6.tfix',
'Milwaukee_6x6.tfix',
'Nashville_6x6.tfix',
'New_Orleans_6x6.tfix',
'New_York_6x6.tfix',
'Oklahoma_City_6x6.tfix',
'Omaha_6x6.tfix',
'Philadelphia_6x6.tfix',
'Phoneix_6x6.tfix',
'Portland_6x6.tfix',
'Sacramento_6x6.tfix',
'San_Antonio_6x6.tfix',
'San_Diego_6x6.tfix',
'San_Francisco_6x6.tfix',
'San_Jose_6x6.tfix',
'San_Juan_6x6.tfix',
'Seattle_6x6.tfix',
'Tucson_6x6.tfix',
'Virginia_Beach_6x6.tfix',
'Washington_6x6.tfix',
]
for x in filenames:
for s in range(2, 6):
cmd = 'python rescale.py ../../../Data/trajectorySets/' + x + ' ' + str(s)
print(cmd)
os.system(cmd)
| gpl-2.0 | 9,173,531,642,800,253,000 | 20.272727 | 82 | 0.677778 | false |
chadversary/chromiumos.chromite | cbuildbot/stages/build_stages.py | 1 | 12895 | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the build stages."""
import functools
import glob
import os
import shutil
from chromite.cbuildbot import commands
from chromite.cbuildbot import constants
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import repository
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import test_stages
from chromite.lib import cros_build_lib
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import parallel
class CleanUpStage(generic_stages.BuilderStage):
"""Stages that cleans up build artifacts from previous runs.
This stage cleans up previous KVM state, temporary git commits,
clobbers, and wipes tmp inside the chroot.
"""
option_name = 'clean'
def _CleanChroot(self):
commands.CleanupChromeKeywordsFile(self._boards,
self._build_root)
chroot_tmpdir = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR,
'tmp')
if os.path.exists(chroot_tmpdir):
cros_build_lib.SudoRunCommand(['rm', '-rf', chroot_tmpdir],
print_cmd=False)
cros_build_lib.SudoRunCommand(['mkdir', '--mode', '1777', chroot_tmpdir],
print_cmd=False)
def _DeleteChroot(self):
chroot = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
if os.path.exists(chroot):
# At this stage, it's not safe to run the cros_sdk inside the buildroot
# itself because we haven't sync'd yet, and the version of the chromite
# in there might be broken. Since we've already unmounted everything in
# there, we can just remove it using rm -rf.
osutils.RmDir(chroot, ignore_missing=True, sudo=True)
def _DeleteArchivedTrybotImages(self):
"""Clear all previous archive images to save space."""
for trybot in (False, True):
archive_root = self._run.GetArchive().GetLocalArchiveRoot(trybot=trybot)
osutils.RmDir(archive_root, ignore_missing=True)
def _DeleteArchivedPerfResults(self):
"""Clear any previously stashed perf results from hw testing."""
for result in glob.glob(os.path.join(
self._run.options.log_dir,
'*.%s' % test_stages.HWTestStage.PERF_RESULTS_EXTENSION)):
os.remove(result)
def _DeleteChromeBuildOutput(self):
chrome_src = os.path.join(self._run.options.chrome_root, 'src')
for out_dir in glob.glob(os.path.join(chrome_src, 'out_*')):
osutils.RmDir(out_dir)
def _DeleteAutotestSitePackages(self):
"""Clears any previously downloaded site-packages."""
site_packages_dir = os.path.join(self._build_root, 'src', 'third_party',
'autotest', 'files', 'site-packages')
# Note that these shouldn't be recreated but might be around from stale
# builders.
if os.path.exists(site_packages_dir):
shutil.rmtree(site_packages_dir)
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
if (not (self._run.options.buildbot or self._run.options.remote_trybot)
and self._run.options.clobber):
if not commands.ValidateClobber(self._build_root):
cros_build_lib.Die("--clobber in local mode must be approved.")
# If we can't get a manifest out of it, then it's not usable and must be
# clobbered.
manifest = None
if not self._run.options.clobber:
try:
manifest = git.ManifestCheckout.Cached(self._build_root, search=False)
except (KeyboardInterrupt, MemoryError, SystemExit):
raise
except Exception as e:
# Either there is no repo there, or the manifest isn't usable. If the
# directory exists, log the exception for debugging reasons. Either
# way, the checkout needs to be wiped since it's in an unknown
# state.
if os.path.exists(self._build_root):
cros_build_lib.Warning("ManifestCheckout at %s is unusable: %s",
self._build_root, e)
# Clean mount points first to be safe about deleting.
commands.CleanUpMountPoints(self._build_root)
if manifest is None:
self._DeleteChroot()
repository.ClearBuildRoot(self._build_root,
self._run.options.preserve_paths)
else:
tasks = [functools.partial(commands.BuildRootGitCleanup,
self._build_root),
functools.partial(commands.WipeOldOutput, self._build_root),
self._DeleteArchivedTrybotImages,
self._DeleteArchivedPerfResults,
self._DeleteAutotestSitePackages]
if self._run.options.chrome_root:
tasks.append(self._DeleteChromeBuildOutput)
if self._run.config.chroot_replace and self._run.options.build:
tasks.append(self._DeleteChroot)
else:
tasks.append(self._CleanChroot)
parallel.RunParallelSteps(tasks)
class InitSDKStage(generic_stages.BuilderStage):
"""Stage that is responsible for initializing the SDK."""
option_name = 'build'
def __init__(self, builder_run, chroot_replace=False, **kwargs):
"""InitSDK constructor.
Args:
builder_run: Builder run instance for this run.
chroot_replace: If True, force the chroot to be replaced.
"""
super(InitSDKStage, self).__init__(builder_run, **kwargs)
self.force_chroot_replace = chroot_replace
def PerformStage(self):
chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
replace = self._run.config.chroot_replace or self.force_chroot_replace
pre_ver = post_ver = None
if os.path.isdir(self._build_root) and not replace:
try:
pre_ver = cros_build_lib.GetChrootVersion(chroot=chroot_path)
commands.RunChrootUpgradeHooks(self._build_root)
except failures_lib.BuildScriptFailure:
cros_build_lib.PrintBuildbotStepText('Replacing broken chroot')
cros_build_lib.PrintBuildbotStepWarnings()
replace = True
if not os.path.isdir(chroot_path) or replace:
use_sdk = (self._run.config.use_sdk and not self._run.options.nosdk)
pre_ver = None
commands.MakeChroot(
buildroot=self._build_root,
replace=replace,
use_sdk=use_sdk,
chrome_root=self._run.options.chrome_root,
extra_env=self._portage_extra_env)
post_ver = cros_build_lib.GetChrootVersion(chroot=chroot_path)
if pre_ver is not None and pre_ver != post_ver:
cros_build_lib.PrintBuildbotStepText('%s->%s' % (pre_ver, post_ver))
else:
cros_build_lib.PrintBuildbotStepText(post_ver)
commands.SetSharedUserPassword(
self._build_root,
password=self._run.config.shared_user_password)
class SetupBoardStage(generic_stages.BoardSpecificBuilderStage, InitSDKStage):
"""Stage that is responsible for building host pkgs and setting up a board."""
option_name = 'build'
def PerformStage(self):
# Calculate whether we should use binary packages.
usepkg = (self._run.config.usepkg_setup_board and
not self._latest_toolchain)
# We need to run chroot updates on most builders because they uprev after
# the InitSDK stage. For the SDK builder, we can skip updates because uprev
# is run prior to InitSDK. This is not just an optimization: It helps
# workaround http://crbug.com/225509
chroot_upgrade = (
self._run.config.build_type != constants.CHROOT_BUILDER_TYPE)
# Iterate through boards to setup.
chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
# Only update the board if we need to do so.
board_path = os.path.join(chroot_path, 'build', self._current_board)
if not os.path.isdir(board_path) or chroot_upgrade:
commands.SetupBoard(
self._build_root, board=self._current_board, usepkg=usepkg,
chrome_binhost_only=self._run.config.chrome_binhost_only,
force=self._run.config.board_replace,
extra_env=self._portage_extra_env, chroot_upgrade=chroot_upgrade,
profile=self._run.options.profile or self._run.config.profile)
class BuildPackagesStage(generic_stages.BoardSpecificBuilderStage,
generic_stages.ArchivingStageMixin):
"""Build Chromium OS packages."""
option_name = 'build'
def __init__(self, builder_run, board, afdo_generate_min=False,
afdo_use=False, **kwargs):
super(BuildPackagesStage, self).__init__(builder_run, board, **kwargs)
self._afdo_generate_min = afdo_generate_min
assert not afdo_generate_min or not afdo_use
useflags = self._run.config.useflags[:]
if afdo_use:
self.name += ' [%s]' % constants.USE_AFDO_USE
useflags.append(constants.USE_AFDO_USE)
if useflags:
self._portage_extra_env.setdefault('USE', '')
self._portage_extra_env['USE'] += ' ' + ' '.join(useflags)
def PerformStage(self):
# If we have rietveld patches, always compile Chrome from source.
noworkon = not self._run.options.rietveld_patches
commands.Build(self._build_root,
self._current_board,
build_autotest=self._run.ShouldBuildAutotest(),
usepkg=self._run.config.usepkg_build_packages,
chrome_binhost_only=self._run.config.chrome_binhost_only,
packages=self._run.config.packages,
skip_chroot_upgrade=True,
chrome_root=self._run.options.chrome_root,
noworkon=noworkon,
extra_env=self._portage_extra_env)
class BuildImageStage(BuildPackagesStage):
"""Build standard Chromium OS images."""
option_name = 'build'
config_name = 'images'
def _BuildImages(self):
# We only build base, dev, and test images from this stage.
if self._afdo_generate_min:
images_can_build = set(['test'])
else:
images_can_build = set(['base', 'dev', 'test'])
images_to_build = set(self._run.config.images).intersection(
images_can_build)
version = self._run.attrs.release_tag
disk_layout = self._run.config.disk_layout
if self._afdo_generate_min and version:
version = '%s-afdo-generate' % version
rootfs_verification = self._run.config.rootfs_verification
commands.BuildImage(self._build_root,
self._current_board,
sorted(images_to_build),
rootfs_verification=rootfs_verification,
version=version,
disk_layout=disk_layout,
extra_env=self._portage_extra_env)
# Update link to latest image.
latest_image = os.readlink(self.GetImageDirSymlink('latest'))
cbuildbot_image_link = self.GetImageDirSymlink()
if os.path.lexists(cbuildbot_image_link):
os.remove(cbuildbot_image_link)
os.symlink(latest_image, cbuildbot_image_link)
self.board_runattrs.SetParallel('images_generated', True)
parallel.RunParallelSteps(
[self._BuildVMImage, lambda: self._GenerateAuZip(cbuildbot_image_link)])
def _BuildVMImage(self):
if self._run.config.vm_tests and not self._afdo_generate_min:
commands.BuildVMImageForTesting(
self._build_root,
self._current_board,
disk_layout=self._run.config.disk_vm_layout,
extra_env=self._portage_extra_env)
def _GenerateAuZip(self, image_dir):
"""Create au-generator.zip."""
if not self._afdo_generate_min:
commands.GenerateAuZip(self._build_root,
image_dir,
extra_env=self._portage_extra_env)
def _HandleStageException(self, exc_info):
"""Tell other stages to not wait on us if we die for some reason."""
self.board_runattrs.SetParallelDefault('images_generated', False)
return super(BuildImageStage, self)._HandleStageException(exc_info)
def PerformStage(self):
self._BuildImages()
class UprevStage(generic_stages.BuilderStage):
"""Stage that uprevs Chromium OS packages that the builder intends to
validate.
"""
config_name = 'uprev'
option_name = 'uprev'
def __init__(self, builder_run, boards=None, enter_chroot=True, **kwargs):
super(UprevStage, self).__init__(builder_run, **kwargs)
self._enter_chroot = enter_chroot
if boards is not None:
self._boards = boards
def PerformStage(self):
# Perform other uprevs.
overlays, _ = self._ExtractOverlays()
commands.UprevPackages(self._build_root,
self._boards,
overlays,
enter_chroot=self._enter_chroot)
| bsd-3-clause | 7,007,675,836,784,965,000 | 38.194529 | 80 | 0.658007 | false |
dzolnierz/mysql-utilities | mysql/utilities/common/sql_transform.py | 1 | 61521 | #
# Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the methods for building SQL statements for definition
differences.
"""
import re
from mysql.utilities.exception import UtilError, UtilDBError
from mysql.connector.conversion import MySQLConverter
_IGNORE_COLUMN = -1 # Ignore column in comparisons and transformations
_FORCE_COLUMN = -2 # Force column to be included in build phase
# Define column control symbols
_DROP_COL, _ADD_COL, _CHANGE_COL_TYPE, _CHANGE_COL_ORDER = range(0, 4)
# List of database objects for enumeration
_DATABASE, _TABLE, _VIEW, _TRIG, _PROC, _FUNC, _EVENT, _GRANT = "DATABASE", \
"TABLE", "VIEW", "TRIGGER", "PROCEDURE", "FUNCTION", "EVENT", "GRANT"
# Define database INFORMATION_SCHEMA column numbers
_DB_NAME, _DB_CHARSET, _DB_COLLATION, _DB_SQL_PATH = range(0, 4)
# Define table INFORMATION_SCHEMA column numbers and index values
_COLUMN_ORDINAL_POSITION, _COLUMN_NAME, _COLUMN_TYPE, _COLUMN_IS_NULLABLE, \
_COLUMN_DEFAULT, _COLUMN_EXTRA, _COLUMN_COMMENT, _COLUMN_KEY = range(0, 8)
_TABLE_DEF, _COLUMN_DEF, _PART_DEF = range(0, 3)
_TABLE_DB, _TABLE_NAME, _TABLE_ENGINE, _TABLE_AUTO_INCREMENT, \
_TABLE_AVG_ROW_LENGTH, _TABLE_CHECKSUM, _TABLE_COLLATION, _TABLE_COMMENT, \
_TABLE_ROW_FORMAT, _TABLE_CREATE_OPTIONS = range(0, 10)
# Define view INFORMATION_SCHEMA column numbers
_VIEW_DB, _VIEW_NAME, _VIEW_BODY, _VIEW_CHECK, _VIEW_DEFINER, \
_VIEW_SECURITY = range(0, 6)
# Define trigger INFORMATION_SCHEMA column numbers
_TRIGGER_DB, _TRIGGER_NAME, _TRIGGER_EVENT, _TRIGGER_TABLE, _TRIGGER_BODY, \
_TRIGGER_TIME, _TRIGGER_DEFINER = range(0, 7)
# Define routine INFORMATION_SCHEMA column numbers
_ROUTINE_DB, _ROUTINE_NAME, _ROUTINE_BODY, _ROUTINE_SQL_DATA_ACCESS, \
_ROUTINE_SECURITY_TYPE, _ROUTINE_COMMENT, _ROUTINE_DEFINER, \
_ROUTINE_PARAMS, _ROUTINE_RETURNS, _ROUTINE_IS_DETERMINISTIC = range(0, 10)
# Define event INFORMATION_SCHEMA column numbers
_EVENT_DB, _EVENT_NAME, _EVENT_DEFINER, _EVENT_BODY, _EVENT_TYPE, \
_EVENT_INTERVAL_FIELD, _EVENT_INTERVAL_VALUE, _EVENT_STATUS, \
_EVENT_ON_COMPLETION, _EVENT_STARTS, _EVENT_ENDS = range(0, 11)
# Get the constraints but ignore primary keys
_CONSTRAINT_QUERY = """
SELECT CONSTRAINT_NAME, CONSTRAINT_TYPE
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS
WHERE TABLE_SCHEMA = '%(db)s' AND TABLE_NAME = '%(name)s'
and CONSTRAINT_TYPE != 'PRIMARY KEY'
and CONSTRAINT_TYPE != 'UNIQUE'
"""
def to_sql(obj):
"""Convert a value to a suitable SQL value placing quotes where needed.
obj[in] object (value) to convert
Returns (string) converted value
"""
to_sql.__dict__.setdefault('converter', MySQLConverter())
obj = to_sql.converter.escape(obj) # pylint: disable=E1101
return str(to_sql.converter.quote(obj)) # pylint: disable=E1101
def quote_with_backticks(identifier):
"""Quote the given identifier with backticks, converting backticks (`) in
the identifier name with the correct escape sequence (``).
identifier[in] identifier to quote.
Returns string with the identifier quoted with backticks.
"""
return "`" + identifier.replace("`", "``") + "`"
def quote_with_backticks_definer(definer):
"""Quote the given definer clause with backticks.
This functions quotes the given definer clause with backticks, converting
backticks (`) in the string with the correct escape sequence (``).
definer[in] definer clause to quote.
Returns string with the definer quoted with backticks.
"""
if not definer:
return definer
parts = definer.split('@')
if len(parts) != 2:
return definer
return '@'.join([quote_with_backticks(parts[0]),
quote_with_backticks(parts[1])])
def remove_backtick_quoting(identifier):
"""Remove backtick quoting from the given identifier, reverting the
escape sequence (``) to a backtick (`) in the identifier name.
identifier[in] identifier to remove backtick quotes.
Returns string with the identifier without backtick quotes.
"""
# remove backtick quotes
identifier = identifier[1:-1]
# Revert backtick escape sequence
return identifier.replace("``", "`")
def is_quoted_with_backticks(identifier):
"""Check if the given identifier is quoted with backticks.
identifier[in] identifier to check.
Returns True if the identifier has backtick quotes, and False otherwise.
"""
return identifier[0] == "`" and identifier[-1] == "`"
def convert_special_characters(str_val):
"""Convert especial characters in the string to respective escape sequence.
This method converts special characters in the input string to the
corresponding MySQL escape sequence, according to:
http://dev.mysql.com/doc/en/string-literals.html#character-escape-sequences
str_val[in] string value to be converted.
Returns the input string with all special characters replaced by its
respective escape sequence.
"""
# Check if the input value is a string before performing replacement.
if str_val and isinstance(str_val, basestring):
# First replace backslash '\' character, to avoid replacing '\' in
# further escape sequences. backslash_re matches '|' not followed by %
# as \% and \_ do not need to be replaced, and when '|' appear at the
# end of the string to be replaced correctly.
backslash_re = r'\\(?=[^%_])|\\\Z'
res = re.sub(backslash_re, r'\\\\', str_val)
# Replace remaining especial characters
res = res.replace('\x00', '\\0') # \0
res = res.replace("'", "\\'") # \'
res = res.replace('"', '\\"') # \"
res = res.replace('\b', '\\b') # \b
res = res.replace('\n', '\\n') # \n
res = res.replace('\r', '\\r') # \r
res = res.replace('\t', '\\t') # \t
res = res.replace(chr(26), '\\Z') # \Z
return res
else:
# Not a string, return the input value
return str_val
def build_pkey_where_clause(table, row):
"""Build the WHERE clause based on the primary keys
table[in] instance of Table class for table
row[in] row of data
Returns string - WHERE clause or "" if no keys
"""
where_str = ""
pkeys = table.get_primary_index()
if len(pkeys) > 0:
col_names = table.get_col_names()
where_str += "WHERE "
pkey_cond_lst = []
for pkey in pkeys:
key_col = pkey[0] # get the column name
col_data = row[col_names.index(key_col)] # get column value
# quote key column with backticks
q_key_col = quote_with_backticks(key_col)
pkey_cond_lst.append("{0} = {1}".format(q_key_col,
to_sql(col_data)))
where_str = "{0}{1}".format(where_str, ' AND '.join(pkey_cond_lst))
return where_str
def build_set_clauses(table, table_cols, dest_row, src_row):
"""Build the SET clauses for an UPDATE statement
table[in] instance of Table class for table
dest_row[in] row of data for destination (to be changed)
src_row[in] row of data for source (to be changed to)
Returns string - WHERE clause or "" if no keys
"""
table.get_column_metadata()
# do SETs
set_str = ""
do_comma = False
for col_idx in range(0, len(table_cols)):
if dest_row[col_idx] != src_row[col_idx]:
# do comma
if do_comma:
set_str += ", "
else:
set_str = "SET "
do_comma = True
# Check for NULL for non-text fields that have no value in new row
if src_row[col_idx] is None:
set_str += "%s = %s" % (table_cols[col_idx], "NULL")
else:
set_str += "%s = %s" % (table_cols[col_idx],
to_sql(src_row[col_idx]))
return set_str
def transform_data(destination, source, operation, rows):
"""Transform data for tables.
This method will generate INSERT, UPDATE, and DELETE statements for
transforming data found to differ among tables.
destination[in] Table class instance of the destination
source[in] Table class instance of the source
operation[in] specify if INSERT, UPDATE, or DELETE
rows[in] rows for transformation as follows:
UPDATE - tuple (old, new)
DELETE - list to delete
INSERT - list to insert
Returns list - SQL statement(s) for transforming the data or a warning
if the columns differ between the tables
"""
statements = []
# Get column names quoted with backticks
dest_cols = destination.get_col_names(quote_backticks=True)
src_cols = source.get_col_names(quote_backticks=True)
# We cannot do the data changes if the columns are different in the
# destination and source!
if dest_cols != src_cols:
return ["WARNING: Cannot generate SQL UPDATE commands for "
"tables whose definitions are different. Check the "
"table definitions for changes."]
data_op = operation.upper()
if data_op == "INSERT":
for row in rows:
formatted_row = []
for col in row:
formatted_row.append(to_sql(col))
statements.append("INSERT INTO %s (%s) VALUES(%s);" %
(destination.q_table, ', '.join(dest_cols),
', '.join(formatted_row)))
elif data_op == "UPDATE":
for i in range(0, len(rows[0])):
row1 = rows[0][i]
row2 = rows[1][i]
sql_str = "UPDATE %s" % destination.q_table
sql_str += " %s" % build_set_clauses(source, src_cols, row1, row2)
sql_str += " %s" % build_pkey_where_clause(source, row2)
statements.append("%s;" % sql_str)
elif data_op == "DELETE":
for row in rows:
sql_str = "DELETE FROM %s " % destination.q_table
sql_str += build_pkey_where_clause(source, row)
statements.append("%s;" % sql_str)
else:
raise UtilError("Unknown data transformation option: %s." % data_op)
return statements
class SQLTransformer(object):
"""
The SQLTransformer class provides a mechanism for generating SQL statments
for conforming an object to another for a specific database. For example,
it will generate the ALTER statement(s) for transforming a table definition
as well as the UPDATE statement(s) for transforming a row in the table.
Note: This class is designed to work with the output of the Database class
method get_db_objects with full INFORMATION_SCHEMA columns for the
object definition.
This class contains transformation methods for the objects supported.
Each object's ALTER statement is generated using the following steps.
Note: tables are a bit different due to their many parts but still follow
the general layout.
- a list of dictionaries structure is built to contain the parts of the
statement where each dictionary has fields for format ('fmt') that
contains the string format for building the value, column ('col') for
containing the column number for the value, and value ('val') which
is for holding the value.
- any special formatting, conditionals, etc. concerning the fields is
processed. In some cases this means filling the 'val' for the field.
- the structure values are filled
- the statement is build by concatenating those fields where 'val' is
not empty.
You can tell the fill values phase to ignore filling the value by using
_IGNORE_COLUMN as the column number.
You can tell the build phase to include the field (say after special
processing has filled the value) by using _FORCE_COLUMN as the column
number.
"""
def __init__(self, destination_db, source_db, destination,
source, obj_type, verbosity, options=None):
"""Constructor
destination_db[in] destination Database instance
source_db[in] source Database instance
destination[in] the original object definition or data
source[in] the source object definition or data
obj_type[in] type of object
verbosity[in] verbosity level
options[in] Options dictionary
"""
self.destination_db = destination_db
self.source_db = source_db
self.destination = destination
self.source = source
self.obj_type = obj_type.upper()
self.verbosity = verbosity
self.dest_tbl = None
self.src_tbl = None
if options is None:
options = {}
self.skip_table_opts = options.get("skip_table_opts", False)
def transform_definition(self):
"""Transform an object definition
This method will transform an object definition to match the source
configuration. It returns the appropriate SQL statement(s) to
transform the object or None if no transformation is needed.
Note: the method will throw an exception if the transformation cannot
be completed or there is another error during processing
Returns list - SQL statement(s) for transforming the object
"""
trans_method = {
_DATABASE: self._transform_database,
_TABLE: self._transform_table,
_VIEW: self._transform_view,
_TRIG: self._transform_trigger,
_PROC: self._transform_routine,
_FUNC: self._transform_routine,
_EVENT: self._transform_event,
}
try:
return trans_method[self.obj_type]()
except IndexError:
raise UtilDBError("Unknown object type '%s' for transformation." %
self.obj_type)
def _transform_database(self):
"""Transform a database definition
This method will transform a database definition to match the source
configuration. It returns the ALTER DATABASE SQL statement to
transform the object or None if no transformation is needed.
Returns list - ALTER DATABASE statement for transforming the database
"""
statements = []
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "ALTER DATABASE"},
# object name
{'fmt': " %s", 'col': _IGNORE_COLUMN,
'val': self.destination[_DB_NAME]},
# charset
{'fmt': " CHARACTER SET %s", 'col': _DB_CHARSET, 'val': ""},
# collation
{'fmt': " COLLATE = %s", 'col': _DB_COLLATION, 'val': ""},
]
# if no changes, return None
if not self._fill_values(statement_parts, False):
return None
sql_stmt = "%s;" % self._build_statement(statement_parts)
statements.append(sql_stmt)
return statements
@staticmethod
def _convert_option_values(option_values):
"""Convert a list of option=value to a list of names and name, value
pairs.
This method takes a list like the following where each element is a
name=value string:
(a=1, b=3, c=5, d=4)
turning into a tuple containing a list of names and a list of
name,value pairs as follows:
((a,b,c,d), ((a,1),(b,3),(c,5),(d,4)))
Value pairs that do not have a value are ignored. For example,
'a=3, b, c=2' will ignore 'b' but return a and c.
option_values[in] list of name=value strings
Returns tuple - (list of names, list of (name, value))
"""
names = []
name_values = []
for value_pair in option_values:
name_value = value_pair.split('=')
# Ignore any value pairs that do not have a value
if len(name_value[0]) > 0:
names.append(name_value[0].upper())
name_values.append(name_value)
return (names, name_values)
@staticmethod
def _find_value(name, name_values):
"""Find a value for a name in a list of tuple (name, value)
name[in] name of pair
name_values[in] list of tuples
Returns string - value at index of match or None
"""
name = name.upper()
for item in name_values:
if item[0].upper() == name:
try:
return item[1]
except IndexError:
return None
return None
def _parse_table_options(self, destination, source):
"""Parse the table options into a list and compare.
This method returns a comma-separated list of table options that
differ from the destination to the source.
destination[in] the original object definition or data
source[in] the source object definition or data
Returns string - comma-separated values for table options that differ
or None if options are found in the destination that
are not in the source. These, we do not know how
to remove or turn off without extensive, specialized
code.
"""
from mysql.utilities.common.dbcompare import get_common_lists
# Here we have a comma-separated list of options in the form
# name=value. To determine the inclusion/exclusion lists, we
# must compare on names only so we make a list for each of only
# the names.
dest_opts_names = []
dest_opts = [item.strip() for item in destination.split(',')]
dest_opts_names, dest_opts_val = self._convert_option_values(dest_opts)
dest_opts_names.sort()
src_opts = [item.strip() for item in source.split(',')]
src_opts_names, src_opts_val = self._convert_option_values(src_opts)
src_opts_names.sort()
in_both, in_dest_not_src, in_src_not_dest = \
get_common_lists(dest_opts_names, src_opts_names)
# Whoops! There are things set in the destination that aren't in the
# source so we don't know if these are Ok or if we need to do
# something special.
if len(in_dest_not_src) > 0:
return None
changes = []
# Now check for changes for both
for name in in_both:
dest_val = self._find_value(name, dest_opts_val)
src_val = self._find_value(name, src_opts_val)
if dest_val is not None and dest_val != src_val:
changes.append("%s=%s" % (name.upper(), src_val))
# Get values for those not in destination
for item in in_src_not_dest:
val = self._find_value(item, src_opts_val)
if val is not None:
changes.append("%s=%s" % (item.upper(), val))
return ', '.join(changes)
def _get_table_defns(self, destination, source):
"""Get the transform fpr the general options for a table
This method creates an ALTER TABLE statement for table definitions
that differ. The items covered include only those options described
in the reference manual as table_options and include the following:
engine, auto_increment, avg_row_count, checksum, collation,
comment, and create options
destination[in] the original object definition or data
source[in] the source object definition or data
Returns string - ALTER TABLE clause or None if no transform needed
"""
changes = self._check_columns([_TABLE_COMMENT], destination, source)
# build a list of the parts
statement_parts = [
# rename
{'fmt': "RENAME TO %s.%s \n", 'col': _IGNORE_COLUMN, 'val': ""},
# engine
{'fmt': "ENGINE=%s", 'col': _TABLE_ENGINE, 'val': ""},
# auto increment
{'fmt': "AUTO_INCREMENT=%s", 'col': _TABLE_AUTO_INCREMENT,
'val': ""},
# collation
{'fmt': "COLLATE=%s", 'col': _TABLE_COLLATION, 'val': ""},
# comment - always include to ensure comments can be removed
{'fmt': "COMMENT='%s'", 'col': _IGNORE_COLUMN,
'val': source[_TABLE_COMMENT]},
# create options - will be completed later
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': ""},
]
dest_create = destination[_TABLE_CREATE_OPTIONS]
src_create = source[_TABLE_CREATE_OPTIONS]
if dest_create != src_create:
create = statement_parts[5]
opt_val = self._parse_table_options(dest_create, src_create)
if opt_val is None:
return ("# WARNING: the destination table contains options "
"that are not in the source.\n# Cannot generate ALTER "
"statement.")
else:
create['val'] = "%s" % opt_val
changes = True
# if no changes, return None
if not changes and not self._fill_values(statement_parts, False,
destination, source):
return None
# We need to check the comment again and include it if source == ''
if self._check_columns([_TABLE_COMMENT], destination, source) and \
source[_TABLE_COMMENT] == '':
statement_parts[4]['col'] = _FORCE_COLUMN
# Check for rename
if destination[_TABLE_NAME] != source[_TABLE_NAME]:
statement_parts[0]['val'] = (source[_DB_NAME], source[_TABLE_NAME])
# check and set commas
do_comma = False
for part in statement_parts:
if do_comma:
part['fmt'] = ', ' + part['fmt']
elif part['col'] == _FORCE_COLUMN or part['val'] != '':
do_comma = True
return self._build_statement(statement_parts)
@staticmethod
def _get_column_format(col_data):
"""Build the column data type format string
col_data[in] the row containing the column definition
Retuns string - column data type format
"""
if col_data is None:
return ""
col_fmt = "%(type)s%(null)s%(default)s%(extra)s%(comment)s"
values = {
'type': col_data[_COLUMN_TYPE],
'null': "",
'default': "",
'extra': "",
'comment': "",
}
if col_data[_COLUMN_IS_NULLABLE].upper() == "NO":
values['null'] = " NOT NULL"
else:
values['null'] = " NULL"
if col_data[_COLUMN_DEFAULT] is not None and \
len(col_data[_COLUMN_DEFAULT]) > 0:
def_val = col_data[_COLUMN_DEFAULT]
# add quotes if needed
if def_val.upper() != "CURRENT_TIMESTAMP":
def_val = to_sql(def_val)
values['default'] = " DEFAULT %s" % def_val
if len(col_data[_COLUMN_EXTRA]) > 0:
if col_data[_COLUMN_EXTRA].upper() != "AUTO_INCREMENT":
values['extra'] = " %s" % col_data[_COLUMN_EXTRA]
if len(col_data[_COLUMN_COMMENT]) > 0:
values['comment'] = " COMMENT '%s'" % col_data[_COLUMN_COMMENT]
return col_fmt % values
@staticmethod
def _get_column_position(destination_def, source_def, destination, source,
drop_cols, add_cols):
"""Get the column position in the list
destination_def[in] destination column definition
source_def[in] source column definition
destination[in] destination column definitions
source[in] source column definitions
drop_cols[in] list of columns to be dropped - used to
calculate position of existing columns by
eliminating those cols in destination that will be
dropped
add_cols[in] list of columns to be added - used to
calculate position of existing columns by
eliminating those cols in destination that will be
dropped
Returns string - 'BEFORE' or 'AFTER' for column position or "" if
position cannot be determined (add or drop column)
"""
# Converting ordinal position to index positions:
#
# - ordinal positions start counting at 1
# - list indexes start at 0
#
# So if you want to find the column that is one less than the ordinal
# position of the current column, you must subtract 1 then subtract 1
# again to convert it to the list index.
dest_loc_idx = None
src_loc_idx = int(source_def[_COLUMN_ORDINAL_POSITION]) - 1
if destination_def is not None:
dest_loc_idx = int(destination_def[_COLUMN_ORDINAL_POSITION]) - 1
# Check to see if previous column has been dropped. If it has,
# don't include the BEFORE|AFTER - it will be ordered correctly.
if dest_loc_idx is not None and dest_loc_idx - 1 >= 0 and \
destination[dest_loc_idx - 1][_COLUMN_NAME] in drop_cols:
return ""
# Check to see if previous column has been added. If it has,
# don't include the BEFORE|AFTER - it will be ordered correctly.
if (src_loc_idx - 1 >= 0
and source[src_loc_idx - 1][_COLUMN_NAME] in add_cols):
return ""
# compare ordinal position - if not the same find where it goes
if dest_loc_idx is None or dest_loc_idx != src_loc_idx:
if src_loc_idx == 0:
return " FIRST"
for col in source:
if src_loc_idx == int(col[_COLUMN_ORDINAL_POSITION]):
return " AFTER %s" % col[_COLUMN_NAME]
return ""
@staticmethod
def _find_column(name, columns):
"""Find a column in a list by name
name[in] name of the column
columns[in] list of column definitions
Returns - column definition or None if column not found
"""
for col_def in columns:
if name == col_def[_COLUMN_NAME]:
return col_def
return None
def _get_column_change(self, column, destination, source,
drop_cols, add_cols):
"""Identify if column differs and return the changes
column[in] column name and operation type
destination[in] column definitions for destination
source[in] column definitions for source
drop_cols[in] list of columns to be dropped - used to
calculate position of existing columns
add_cols[in] list of columns to be added - used to
calculate position of existing columns
Returns string - new changes for column or ""
"""
operation = column[1]
# Get column from the origins
destination_def = self._find_column(column[0], destination)
source_def = self._find_column(column[0], source)
# Here we look for columns that are set for checking the order but
# the extra data (null, etc.) is different. So we change it to
# a type change instead. Exclude key column in compare.
if operation == _CHANGE_COL_ORDER and \
destination_def[:_COLUMN_KEY] != source_def[:_COLUMN_KEY]:
operation = _CHANGE_COL_TYPE
# Check for drop column
if operation == _DROP_COL:
colstr = " DROP COLUMN %s" % destination_def[_COLUMN_NAME]
else:
# Determine position and get the type format string
col_pos = self._get_column_position(destination_def, source_def,
destination, source,
drop_cols, add_cols)
col_fmt = self._get_column_format(source_def)
# Check for order changes
if operation == _CHANGE_COL_ORDER:
if len(col_pos) > 0:
colstr = " CHANGE COLUMN %s %s %s%s" % \
(source_def[_COLUMN_NAME],
source_def[_COLUMN_NAME],
col_fmt, col_pos)
else:
colstr = "" # No change needed here
# Add or change column
elif operation == _ADD_COL:
colstr = " ADD COLUMN %s %s%s" % (source_def[_COLUMN_NAME],
col_fmt, col_pos)
else: # must be change
colstr = " CHANGE COLUMN %s %s " % \
(destination_def[_COLUMN_NAME],
destination_def[_COLUMN_NAME])
colstr += "%s%s" % (col_fmt, col_pos)
return colstr
def _get_columns(self, destination, source):
"""Get the column definition changes
This method loops through the columns and if different builds ALTER
statments for transforming the columns of the destination table to the
source table.
destination[in] the original object definition or data
source[in] the source object definition or data
Returns string - ALTER statement or None if no column differences.
"""
from mysql.utilities.common.dbcompare import get_common_lists
drop_clauses = []
add_clauses = []
# Build lists with minimal matching data (column name and type) for
# destination and source. Then do the compare. Result is as follows:
#
# - those in both (name, type) will need to be checked for order
# of cols to generate CHANGE COLUMN x x <type> BEFORE|AFTER x
# - those in destination but not source will be dropped unless the
# name appears in source but not destination to generate
# DROP COULMN x
# - those in destination but not source where the name does appear in
# source is a change of type to generate CHANGE COLUMN x x <type>
# - those in source but not destination that don't match by name in
# destination but not source are new columns to generate
# ADD COLUMN x <type>
# - those columns that match on both name and type need to be
# checked for order changes to generate the
# CHANGE COLUMN x BEFORE|AFTER
# - we need to check those that the column order changes to see
# if they are actually extra col def changes
dest_min = [item[1:3] for item in destination] # name, type
src_min = [item[1:3] for item in source] # name, type
# find matches by name + type
# <both_min>, <dest_src_min>, <src_dest_min> = get_common_lists
(both_min, _, _,) = get_common_lists(dest_min, src_min)
dest_src_names = [item[0] for item in dest_min] # only name
src_dest_names = [item[0] for item in src_min] # only name
# find matches by name only
both_names = [item[0] for item in both_min] # only name
both_check, dest_drop, src_new = get_common_lists(dest_src_names,
src_dest_names)
# find matches by name but not type
both_change_type = list(set(both_check) - set(both_names))
# remove type changes and form list for checking order
both_change_order = list(set(both_names) - set(both_change_type))
column_drops = []
column_changes = [] # a list of tuples in form (col_name, operation)
# Form drops
for col in dest_drop:
column_drops.append((col, _DROP_COL))
# Build the drop statements
for col in column_drops:
change_str = self._get_column_change(col, destination, source,
dest_drop, src_new)
if len(change_str) > 0:
# if first is specified, push to front of list
if change_str.endswith(" FIRST"):
drop_clauses.insert(0, change_str)
else:
drop_clauses.append(change_str)
# Form change type
for col in both_change_type:
column_changes.append((col, _CHANGE_COL_TYPE))
# Form add column
for col in src_new:
column_changes.append((col, _ADD_COL))
# Form change order
for col in both_change_order:
column_changes.append((col, _CHANGE_COL_ORDER))
# Build the add/change statements
for col in column_changes:
change_str = self._get_column_change(col, destination, source,
dest_drop, src_new)
if len(change_str) > 0:
# if first is specified, push to front of list
if change_str.endswith(" FIRST"):
add_clauses.insert(0, change_str)
else:
add_clauses.append(change_str)
return (drop_clauses, add_clauses)
def _get_foreign_keys(self, src_db, src_name, dest_db, dest_name):
"""Get the foreign key constraints
This method returns the table foreign keys via ALTER TABLE clauses
gathered from the Table class methods.
src_db[in] database name for source table
src_name[in] table name for source table
dest_db[in] database name for destination table
dest_name[in] table name for destination table
Returns tuple - (drop, add/changes)
"""
from mysql.utilities.common.table import Table
from mysql.utilities.common.dbcompare import get_common_lists
# Get the Table instances
self.dest_tbl = Table(self.destination_db.source, "%s.%s" %
(dest_db, dest_name))
self.src_tbl = Table(self.source_db.source, "%s.%s" %
(src_db, src_name))
drop_constraints = []
add_constraints = []
# Now we do foreign keys
dest_fkeys = self.dest_tbl.get_tbl_foreign_keys()
src_fkeys = self.src_tbl.get_tbl_foreign_keys()
# Now we determine the foreign keys we need to add and those to drop
# <both_min>, <dest_src_min>, <src_dest_min> = get_common_lists
_, drop_rows, add_rows = get_common_lists(dest_fkeys, src_fkeys)
# Generate DROP foreign key clauses
for fkey in drop_rows:
drop_constraints.append(" DROP FOREIGN KEY %s" % fkey[0])
# if fkey[0] not in drop_idx_recorded:
# constraints.append(" DROP INDEX %s" % fkey[0])
# Generate Add foreign key clauses
clause_fmt = "ADD CONSTRAINT %s FOREIGN KEY(%s) REFERENCES " + \
"`%s`.`%s`(%s)"
for fkey in add_rows:
add_constraints.append(clause_fmt % fkey)
return (drop_constraints, add_constraints)
@staticmethod
def _get_index_sql_clauses(rows):
"""Return the ALTER TABLE index clauses for the table.
This method returns the SQL index clauses for use in ALTER or CREATE
TABLE commands for defining the indexes for the table.
rows[in] result set of index definitions
Returns list - list of SQL index clause statements or
[] if no indexes
"""
index_clauses = []
if rows != []:
pri_key_cols = []
unique_indexes = []
unique_key_cols = []
unique_name = None
unique_method = None
unique_setting = None
for key in rows:
if key[2] == 'PRIMARY':
q_key = quote_with_backticks(key[4])
pri_key_cols.append(q_key)
else:
if unique_name is None:
unique_name = key[2]
unique_method = key[10]
unique_setting = key[1]
unique_key_cols.append(key[4])
elif unique_name == key[2]:
unique_key_cols.append(key[4])
else:
unique_indexes.append((unique_name, unique_method,
unique_setting,
unique_key_cols))
unique_key_cols = []
unique_name = key[2]
unique_method = key[10]
unique_setting = key[1]
unique_key_cols.append(key[4])
# add the last one
if unique_name is not None:
unique_indexes.append((unique_name, unique_method,
unique_setting,
unique_key_cols))
# Build SQL statement clause
if len(pri_key_cols) > 0:
index_clauses.append(" ADD PRIMARY KEY(%s)" %
','.join(pri_key_cols))
if len(unique_indexes) > 0:
for idx in unique_indexes:
create_idx = " ADD "
if int(idx[2]) != 1:
create_idx += "UNIQUE "
if idx[1] == "FULLTEXT":
create_idx += "FULLTEXT "
if (idx[1] == "RTREE"):
using = " USING %s" % (idx[1])
else:
using = ""
create_idx += "INDEX %s%s (%s)" % \
(idx[0], using,
','.join(idx[3]))
index_clauses.append(create_idx)
return index_clauses
def _get_indexes(self, src_db, src_name, dest_db, dest_name):
"""Get the index constraints
This method returns the table primary keys, and other indexes via
ALTER TABLE clauses gathered from the Table class methods.
src_db[in] database name for source table
src_name[in] table name for source table
dest_db[in] database name for destination table
dest_name[in] table name for destination table
Returns tuple - (drop, add/changes)
"""
from mysql.utilities.common.table import Table
from mysql.utilities.common.dbcompare import get_common_lists
# Get the Table instances
self.dest_tbl = Table(self.destination_db.source, "%s.%s" %
(dest_db, dest_name))
self.src_tbl = Table(self.source_db.source, "%s.%s" %
(src_db, src_name))
drop_indexes = []
add_indexes = []
# Get the list of indexes
# Do not compare with the name of the tables
dest_idx = [('',) + tuple(idx[1:])
for idx in self.dest_tbl.get_tbl_indexes()]
src_idx = [('',) + tuple(idx[1:])
for idx in self.src_tbl.get_tbl_indexes()]
# Now we determine the indexes we need to add and those to drop
_, drop_idx, add_idx = get_common_lists(dest_idx, src_idx)
if not drop_idx and not add_idx:
return ([], [])
# Generate DROP index clauses
drop_idx_recorded = [] # used to avoid duplicate index drops
for index in drop_idx:
if index[2] == "PRIMARY":
drop_indexes.append(" DROP PRIMARY KEY")
elif index[2] not in drop_idx_recorded:
drop_indexes.append(" DROP INDEX %s" % index[2])
drop_idx_recorded.append(index[2])
# Generate ADD index clauses
if len(add_idx) > 0:
add_indexes.extend(self._get_index_sql_clauses(add_idx))
return (drop_indexes, add_indexes)
@staticmethod
def _check_for_partitions(destination_row, source_row):
"""Determine if there are transformations involving partitions
This method returns TRUE if the destination and source differ in
partitioning configurations
destination_row[in] the original object definition or data
source_row[in] the source object definition or data
Returns bool - True = differences found, False = no differences
"""
#
# TODO: Complete this operation with a new worklog.
# This release does not support transformation of partitions.
part_changes_found = False
if len(destination_row) != len(source_row):
part_changes_found = True
elif len(destination_row) == 0:
return None
elif len(destination_row) == 1:
if not (destination_row[0][3] is None
and source_row[0][3] is None):
part_changes_found = True
else:
part_stop = len(destination_row)
row_stop = len(destination_row[0])
for i in range(0, part_stop):
for j in range(0, row_stop):
if destination_row[i][j] != source_row[i][j]:
part_changes_found = True
break
return part_changes_found
def _transform_table(self):
"""Transform a table definition
This method will transform a table definition to match the source
configuration. It returns the ALTER TABLE SQL statement to
transform the object or None if no transformation is needed.
Note: The incoming lists contain a tuple defined as:
(table definitions, columns, partitions, constraints)
for destination and source.
Returns list - ALTER TABLE statements for transforming the table
"""
statements = []
# Collect a list of all of the ALTER clauses. Order is important in
# building an ALTER TABLE statement. For safety (and correct execution)
# we must order the clauses as follows:
#
# - drop foreign key constraints
# - drop indexes
# - drop columns
# - add/change columns
# - add/change indexes
# - add/change foreign keys
# - general table changes
#
# Note: partition changes not supported by this release
src_db_name = self.source[_TABLE_DEF][_TABLE_DB]
src_tbl_name = self.source[_TABLE_DEF][_TABLE_NAME]
dest_db_name = self.destination[_TABLE_DEF][_TABLE_DB]
dest_tbl_name = self.destination[_TABLE_DEF][_TABLE_NAME]
# Quote identifiers with bacticks
q_src_db_name = quote_with_backticks(src_db_name)
q_src_tbl_name = quote_with_backticks(src_tbl_name)
q_dest_db_name = quote_with_backticks(dest_db_name)
q_dest_tbl_name = quote_with_backticks(dest_tbl_name)
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "ALTER TABLE"},
# object name
{'fmt': " %s.%s", 'col': _IGNORE_COLUMN,
'val': (q_dest_db_name, q_dest_tbl_name)},
# alter clauses - will be completed later
{'fmt': " \n%s", 'col': _IGNORE_COLUMN, 'val': ""},
]
# For foreign key changes, we need two collections: drop statements,
# add and change statements. Method returns tuple of (drop, add).
fkeys = self._get_foreign_keys(q_src_db_name, q_src_tbl_name,
q_dest_db_name, q_dest_tbl_name)
# For index changes, we need two collections: drop statements, add and
# change statements. Method returns tuple of (drop, add).
indexes = self._get_indexes(q_src_db_name, q_src_tbl_name,
q_dest_db_name, q_dest_tbl_name)
# For column changes, we need two collections: drop statements, add and
# change statements. Method returns tuple of (drop, add/change).
columns = self._get_columns(self.destination[_COLUMN_DEF],
self.source[_COLUMN_DEF])
# Now add drops then add/changes
for i in range(0, 2):
statements.extend(fkeys[i])
statements.extend(indexes[i])
statements.extend(columns[i])
# General definition returns a single string of the option changes
if not self.skip_table_opts:
gen_defn = self._get_table_defns(self.destination[_TABLE_DEF],
self.source[_TABLE_DEF])
else:
gen_defn = None
if gen_defn is not None:
statements.append(gen_defn)
# Form the SQL command.
statement_parts[2]['val'] = ', \n'.join(statements)
sql_stmts = ["%s;" % self._build_statement(statement_parts)]
# Currently, we check partitions last because this code will
# generate a warning message. Later once this code it complete,
# it can be moved where it belongs in the order of creation of
# the ALTER TABLE statement
if self._check_for_partitions(self.destination[_PART_DEF],
self.source[_PART_DEF]):
sql_stmts.append("# WARNING: Partition transformation is not "
"supported in this release.\n# Please check "
"the table definitions for partition changes.")
return sql_stmts
def _transform_view(self):
"""Transform a view definition
This method will transform a view definition to match the source
configuration. It returns the CREATE OR ALTER VIEW SQL statement to
transform the object or None if no transformation is needed.
Returns list - ALTER VIEW statement for transforming the view
"""
statements = []
# check for create
do_create = self._check_columns([_VIEW_CHECK])
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN,
'val': "CREATE" if do_create else "ALTER"},
# definer
{'fmt': " DEFINER=%s", 'col': _VIEW_DEFINER, 'val': ""},
# security
{'fmt': " SQL SECURITY %s", 'col': _VIEW_SECURITY, 'val': ""},
# object type and name
{'fmt': " VIEW %s.%s", 'col': _IGNORE_COLUMN,
'val': (self.destination[_VIEW_DB],
self.destination[_VIEW_NAME])},
# definition
{'fmt': " AS \n %s", 'col': _VIEW_BODY, 'val': ""},
# check option (will be updated later)
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': ""}
]
changes = False
# view check option is special - we have to handle that separately
if self.destination[_VIEW_CHECK] != self.source[_VIEW_CHECK]:
if self.source[_VIEW_CHECK].upper() != 'NONE':
check = statement_parts[5]
check['val'] = " WITH %s CHECK OPTION" % \
self.source[_VIEW_CHECK]
changes = True
# if no changes, return None
if not changes and not self._fill_values(statement_parts, do_create):
return None
# check to see if definer or security or check option have changed and
# if so add definition (always needed if these change)
if self._check_columns([_VIEW_DEFINER, _VIEW_SECURITY, _VIEW_CHECK]):
statement_parts[4]['val'] = self.source[_VIEW_BODY]
# form the drop if we do a create
if do_create:
statements.append("DROP VIEW IF EXISTS `%s`.`%s`;" %
(self.destination[_VIEW_DB],
self.destination[_VIEW_NAME]))
sql_stmt = "%s;" % self._build_statement(statement_parts)
statements.append(sql_stmt)
return statements
def _transform_trigger(self):
"""Transform a trigger definition
This method will transform a trigger definition to match the source
configuration. It returns the appropriate SQL statement(s) to
transform the object or None if no transformation is needed.
Returns list - SQL statement(s) for transforming the trigger
"""
statements = []
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "CREATE"},
# definer
{'fmt': " DEFINER=%s", 'col': _TRIGGER_DEFINER, 'val': ""},
# object name
{'fmt': " TRIGGER %s.%s", 'col': _IGNORE_COLUMN,
'val': (self.destination[_TRIGGER_DB],
self.destination[_TRIGGER_NAME])},
# trigger timing
{'fmt': " %s", 'col': _TRIGGER_TIME, 'val': ""},
# trigger event
{'fmt': " %s", 'col': _TRIGGER_EVENT, 'val': ""},
# trigger table
{'fmt': " ON %s." % self.destination[_TRIGGER_DB] +
"%s FOR EACH ROW",
'col': _TRIGGER_TABLE, 'val': ""},
# trigger body
{'fmt': " %s;", 'col': _TRIGGER_BODY, 'val': ""},
]
# Triggers don't have ALTER SQL so we just pass back a drop + create.
# if no changes, return None
if not self._fill_values(statement_parts, True):
return None
statements.append("DROP TRIGGER IF EXISTS `%s`.`%s`;" %
(self.destination[_TRIGGER_DB],
self.destination[_TRIGGER_NAME]))
sql_stmt = self._build_statement(statement_parts)
statements.append(sql_stmt)
return statements
def _transform_routine(self):
"""Transform a routine definition
This method will transform a routine (FUNCTION or PROCEDURE) definition
to match the source configuration. It returns the ALTER [FUNCTION |
PROCEDURE] SQL statement to transform the object or None if no
transformation is needed.
Returns list - [CREATE|ALTER] [FUNCTION|PROCEDURE] statement for
transforming the routine
"""
statements = []
# check for create
do_create = self._check_columns([_ROUTINE_BODY,
_ROUTINE_DEFINER,
_ROUTINE_PARAMS])
# Quote destination db and routine names with backticks
q_dest_db = quote_with_backticks(self.destination[_ROUTINE_DB])
q_dest_routine = quote_with_backticks(self.destination[_ROUTINE_NAME])
# build a list of the parts
statement_parts = [
# delimiter
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "DELIMITER //\n"},
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN,
'val': "CREATE" if do_create else "ALTER"},
# definer
{'fmt': " DEFINER=%s", 'col': _ROUTINE_DEFINER,
'val': ""},
# object type and name
{'fmt': " %s %s.%s", 'col': _IGNORE_COLUMN,
'val': (self.obj_type.upper(), q_dest_db, q_dest_routine)},
# parameters
{'fmt': " %s", 'col': _IGNORE_COLUMN, 'val': ""},
# returns (Functions only)
{'fmt': " RETURNS %s", 'col': _IGNORE_COLUMN, 'val': ""},
# access method
{'fmt': " %s", 'col': _ROUTINE_SQL_DATA_ACCESS, 'val': ""},
# deterministic (Functions only)
{'fmt': " %s", 'col': _IGNORE_COLUMN, 'val': ""},
# security
{'fmt': " SQL SECURITY %s", 'col': _ROUTINE_SECURITY_TYPE,
'val': ""},
# comment
{'fmt': " COMMENT '%s'", 'col': _ROUTINE_COMMENT, 'val': ""},
# body
{'fmt': " %s", 'col': _ROUTINE_BODY, 'val': ""},
# reset delimiter
{'fmt': "%s", 'col': _IGNORE_COLUMN,
'val': "//\nDELIMITER ;\n"},
]
# if no changes, return None
if not self._fill_values(statement_parts, do_create):
return None
# Add parameters and DEFINER if CREATE statement.
if do_create:
statement_parts[4]['val'] = \
'({0})'.format(self.source[_ROUTINE_PARAMS])
# Quote DEFINER with backticks
statement_parts[2]['val'] = \
quote_with_backticks_definer(self.source[_ROUTINE_DEFINER])
# Add the returns for functions
# Only when doing create or modifications to the body
if self.obj_type.upper() == "FUNCTION":
if (do_create or
self.destination[_ROUTINE_BODY] != self.source[_ROUTINE_BODY]):
statement_parts[5]['val'] = self.source[_ROUTINE_RETURNS]
# Add deterministic
if do_create:
if self.source[_ROUTINE_IS_DETERMINISTIC] == "YES":
statement_parts[7]['val'] = "DETERMINISTIC"
else:
statement_parts[7]['val'] = "NOT DETERMINISTIC"
# form the drop if we do a create
if do_create:
statements.append(
"DROP {0} IF EXISTS {1}.{2};".format(
self.obj_type.upper(), q_dest_db, q_dest_routine
)
)
statements.append(self._build_statement(statement_parts))
return statements
def _transform_event(self):
"""Transform a event definition
This method will transform a event definition to match the source
configuration. It returns the ALTER EVENT SQL statement to
transform the object or None if no transformation is needed.
Notes:
The DEFINER does not compare properly for SHOW CREATE EVENT
comparison.
The RENAME cannot be processed because it requires a different
name and mysqldiff compares on like names.
Returns list - ALTER EVENT statement for transforming the event
"""
statements = []
# build a list of the parts
statement_parts = [
# preamble
{'fmt': "%s", 'col': _IGNORE_COLUMN, 'val': "ALTER"},
# definer
{'fmt': " DEFINER=%s", 'col': _EVENT_DEFINER, 'val': ""},
# type
{'fmt': " %s", 'col': _IGNORE_COLUMN, 'val': "EVENT"},
# object name
{'fmt': " %s.%s", 'col': _IGNORE_COLUMN,
'val': (self.destination[_EVENT_DB],
self.destination[_EVENT_NAME])},
# schedule - will be filled in later
{'fmt': " %s", 'col': _IGNORE_COLUMN, 'val': ""},
# complete
{'fmt': " ON COMPLETION %s", 'col': _EVENT_ON_COMPLETION,
'val': ""},
# rename
{'fmt': " RENAME TO %s", 'col': _EVENT_NAME, 'val': ""},
# status
{'fmt': " %s", 'col': _EVENT_STATUS,
'val': self.source[_EVENT_STATUS]},
# event body
{'fmt': " DO %s", 'col': _EVENT_BODY, 'val': ""},
]
# We can only do the columns we know about and must ignore the others
# like STARTS which may be Ok to differ.
changes = self._check_columns([_EVENT_ON_COMPLETION, _EVENT_STATUS,
_EVENT_BODY, _EVENT_NAME, _EVENT_ENDS,
_EVENT_INTERVAL_FIELD, _EVENT_STARTS,
_EVENT_INTERVAL_VALUE, _EVENT_TYPE])
# We do the schedule separately because requires additional checks
if changes:
schedule = statement_parts[4]
schedule['val'] = "ON SCHEDULE"
if self.source[_EVENT_TYPE].upper() == "RECURRING":
schedule['val'] += " EVERY %s" % \
self.source[_EVENT_INTERVAL_VALUE]
schedule['val'] += " %s" % \
self.source[_EVENT_INTERVAL_FIELD].upper()
if self.source[_EVENT_STARTS] is not None:
schedule['val'] += " STARTS '%s'" % self.source[_EVENT_STARTS]
if self.source[_EVENT_ENDS] is not None:
schedule['val'] += " ENDS '%s'" % self.source[_EVENT_ENDS]
# if no changes, return None
if not changes:
return None
self._fill_values(statement_parts, False)
# We must fix the status value
status = statement_parts[7]
if status['val'].upper() == "DISABLED":
status['val'] = "DISABLE"
elif status['val'].upper() == "ENABLED":
status['val'] = "ENABLE"
elif status['val'].upper() == "SLAVESIDE_DISABLED":
status['val'] = "DISABLE ON SLAVE"
sql_stmt = "%s;" % self._build_statement(statement_parts)
statements.append(sql_stmt)
return statements
def _check_columns(self, col_list, destination=None, source=None):
"""Check for special column changes to trigger a CREATE
This method checks a specific list of columns to see if the values
differ from the destination and source. If they do, the method returns
True else it returns False.
col_list[in] a list of column numbers to check
destination[in] If not None, use this list for destination
(default = None)
source[in] If not None, use this list for source
(default = None)
Returns bool - True = there are differences, False = no differences
"""
if destination is None:
destination = self.destination
if source is None:
source = self.source
for column_num in col_list:
if destination[column_num] != source[column_num]:
return True
return False
def _fill_values(self, stmt_parts, create=False,
destination=None, source=None):
"""Fill the structure with values
This method loops through all of the column dictionaries filling in
the value for any that differ from the destination to the source. If
create is True, it will also fill in the values from the source to
permit the completion of a CREATE statement.
stmt_parts[in] a list of column dictionaries
create[in] if True, fill in all values
if False, fill in only those values that differ
(default = False)
destination[in] If not None, use this list for destination
(default = None)
source[in] If not None, use this list for source
(default = None)
Returns bool - True if changes found
"""
if destination is None:
destination = self.destination
if source is None:
source = self.source
changes_found = False
for part in stmt_parts:
col = part['col']
if col != _IGNORE_COLUMN:
if source[col] is not None and destination[col] != source[col]:
part['val'] = source[col]
changes_found = True
elif create:
part['val'] = destination[col]
return changes_found
@staticmethod
def _build_statement(stmt_parts):
"""Build the object definition statement
This method will build a completed statement based on the list of parts
provided.
stmt_parts[in] a list of column dictionaries
create[in] if True, fill in all values
if False, fill in only those values that differ
(default = False)
Returns string - the object definition string
"""
stmt_values = []
for part in stmt_parts:
if part['col'] == _FORCE_COLUMN or part['val'] != "":
stmt_values.append(part['fmt'] % part['val'])
return ''.join(stmt_values)
| gpl-2.0 | -7,321,135,944,833,890,000 | 39.288802 | 79 | 0.560101 | false |
Mymoza/trello-similar-labels | tests/Test_TrelloWrapper.py | 1 | 1090 | import TrelloWrapper as wrapper
import unittest
class PrimesTestCase(unittest.TestCase):
"""Tests for TrelloWrapper."""
""" TESTS FOR LEVENSHTEIN DISTANCE """
def test_0_levenshtein_distance(self):
"""Is 0 successfully determined in the levenshtein distance?"""
# Two identical words should return 0
test = wrapper.levenshtein_distance("unito", "unito")
self.assertEqual(test,0, "Unito and Unito should return 0")
def test_2_levenshtein_distance(self):
"""Is 2 successfully determined in the levenshtein distance?"""
# Delete F, Add N at the end
test = wrapper.levenshtein_distance("flaw", "lawn")
self.assertEqual(test, 2, "flaw and lawn should return 2")
def test_3_levenshtein_distance(self):
"""Is 3 successfully determined in the levenshtein distance?"""
test = wrapper.levenshtein_distance("kitten", "sitting")
self.assertEqual(test,3, "Kitten and Sitting should return 3")
""" END TESTS FOR LEVENSHTEIN DISTANCE """
if __name__ == '__main__':
unittest.main() | mit | -5,889,056,139,202,562,000 | 34.193548 | 71 | 0.666055 | false |
lgarren/spack | var/spack/repos/builtin/packages/r-annotationfilter/package.py | 1 | 1981 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAnnotationfilter(RPackage):
"""This package provides class and other infrastructure to implement
filters for manipulating Bioconductor annotation resources. The
filters will be used by ensembldb, Organism.dplyr, and other
packages."""
homepage = "https://bioconductor.org/packages/AnnotationFilter/"
url = "https://git.bioconductor.org/packages/AnnotationFilter"
list_url = homepage
version('1.0.0', git='https://git.bioconductor.org/packages/AnnotationFilter', commit='a9f79b26defe3021eea60abe16ce1fa379813ec9')
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-lazyeval', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@1.0.0')
| lgpl-2.1 | -4,270,355,889,196,540,000 | 46.166667 | 133 | 0.683998 | false |
fugitifduck/exabgp | lib/exabgp/bgp/message/update/nlri/evpn/nlri.py | 1 | 2044 | """
evpn.py
Created by Thomas Morin on 2014-06-23.
Copyright (c) 2014-2015 Orange. All rights reserved.
"""
from struct import pack
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
# ========================================================================= EVPN
# +-----------------------------------+
# | Route Type (1 octet) |
# +-----------------------------------+
# | Length (1 octet) |
# +-----------------------------------+
# | Route Type specific (variable) |
# +-----------------------------------+
class EVPN (object):
registered_evpn = dict()
# NEED to be defined in the subclasses
CODE = -1
NAME = 'unknown'
SHORT_NAME = 'unknown'
# lower case to match the class Address API
afi = AFI(AFI.l2vpn)
safi = SAFI(SAFI.evpn)
def __init__ (self, packed):
self.packed = packed
def _prefix (self):
return "evpn:%s:" % (self.registered_evpn.get(self.CODE,self).SHORT_NAME.lower())
def __str__ (self):
return "evpn:%s:%s" % (self.registered_evpn.get(self.CODE,self).SHORT_NAME.lower(),'0x' + ''.join('%02x' % ord(_) for _ in self.packed))
def __repr__ (self):
return str(self)
def pack (self):
return pack('!BB',self.CODE,len(self.packed)) + self.packed
def __len__ (self):
return len(self.packed) + 2
# For subtype 2 (MAC/IP advertisement route),
# we will have to ignore a part of the route, so this method will be overridden
def __cmp__ (self, other):
if not isinstance(other,EVPN):
return -1
if self.CODE != other.CODE:
return -1
if self.packed != other.packed:
return -1
return 0
def __hash__ (self):
return hash("%s:%s:%s:%s" % (self.afi,self.safi,self.CODE,self.packed))
@staticmethod
def register_evpn (klass):
EVPN.registered_evpn[klass.CODE] = klass
@classmethod
def unpack (cls, data):
code = ord(data[0])
length = ord(data[1])
if code in cls.registered_evpn:
return cls.registered_evpn[code].unpack(data[length+1:])
klass = cls(data[length+1:])
klass.CODE = code
return klass
| bsd-3-clause | -7,025,846,381,177,260,000 | 23.926829 | 138 | 0.576321 | false |
quantumlib/Cirq | cirq-google/cirq_google/engine/engine_job_test.py | 1 | 20894 | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
import pytest
from google.protobuf.text_format import Merge
import cirq
import cirq_google as cg
from cirq_google.api import v1, v2
from cirq_google.engine.client.quantum_v1alpha1 import types as qtypes
from cirq_google.engine.engine import EngineContext
def _to_any(proto):
any_proto = qtypes.any_pb2.Any()
any_proto.Pack(proto)
return any_proto
@pytest.fixture(scope='session', autouse=True)
def mock_grpc_client():
with mock.patch(
'cirq_google.engine.engine_client.quantum.QuantumEngineServiceClient'
) as _fixture:
yield _fixture
def test_engine():
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
assert job.engine().project_id == 'a'
def test_program():
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
assert job.program().project_id == 'a'
assert job.program().program_id == 'b'
def test_create_time():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=qtypes.QuantumJob(create_time=qtypes.timestamp_pb2.Timestamp(seconds=1581515101)),
)
assert job.create_time() == datetime.datetime(2020, 2, 12, 13, 45, 1)
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job')
def test_update_time(get_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
get_job.return_value = qtypes.QuantumJob(
update_time=qtypes.timestamp_pb2.Timestamp(seconds=1581515101)
)
assert job.update_time() == datetime.datetime(2020, 2, 12, 13, 45, 1)
get_job.assert_called_once_with('a', 'b', 'steve', False)
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job')
def test_description(get_job):
job = cg.EngineJob(
'a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(description='hello')
)
assert job.description() == 'hello'
get_job.return_value = qtypes.QuantumJob(description='hello')
assert cg.EngineJob('a', 'b', 'steve', EngineContext()).description() == 'hello'
get_job.assert_called_once_with('a', 'b', 'steve', False)
@mock.patch('cirq_google.engine.engine_client.EngineClient.set_job_description')
def test_set_description(set_job_description):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
set_job_description.return_value = qtypes.QuantumJob(description='world')
assert job.set_description('world').description() == 'world'
set_job_description.assert_called_with('a', 'b', 'steve', 'world')
set_job_description.return_value = qtypes.QuantumJob(description='')
assert job.set_description('').description() == ''
set_job_description.assert_called_with('a', 'b', 'steve', '')
def test_labels():
job = cg.EngineJob(
'a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(labels={'t': '1'})
)
assert job.labels() == {'t': '1'}
@mock.patch('cirq_google.engine.engine_client.EngineClient.set_job_labels')
def test_set_labels(set_job_labels):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
set_job_labels.return_value = qtypes.QuantumJob(labels={'a': '1', 'b': '1'})
assert job.set_labels({'a': '1', 'b': '1'}).labels() == {'a': '1', 'b': '1'}
set_job_labels.assert_called_with('a', 'b', 'steve', {'a': '1', 'b': '1'})
set_job_labels.return_value = qtypes.QuantumJob()
assert job.set_labels({}).labels() == {}
set_job_labels.assert_called_with('a', 'b', 'steve', {})
@mock.patch('cirq_google.engine.engine_client.EngineClient.add_job_labels')
def test_add_labels(add_job_labels):
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(labels={}))
assert job.labels() == {}
add_job_labels.return_value = qtypes.QuantumJob(
labels={
'a': '1',
}
)
assert job.add_labels({'a': '1'}).labels() == {'a': '1'}
add_job_labels.assert_called_with('a', 'b', 'steve', {'a': '1'})
add_job_labels.return_value = qtypes.QuantumJob(labels={'a': '2', 'b': '1'})
assert job.add_labels({'a': '2', 'b': '1'}).labels() == {'a': '2', 'b': '1'}
add_job_labels.assert_called_with('a', 'b', 'steve', {'a': '2', 'b': '1'})
@mock.patch('cirq_google.engine.engine_client.EngineClient.remove_job_labels')
def test_remove_labels(remove_job_labels):
job = cg.EngineJob(
'a', 'b', 'steve', EngineContext(), _job=qtypes.QuantumJob(labels={'a': '1', 'b': '1'})
)
assert job.labels() == {'a': '1', 'b': '1'}
remove_job_labels.return_value = qtypes.QuantumJob(
labels={
'b': '1',
}
)
assert job.remove_labels(['a']).labels() == {'b': '1'}
remove_job_labels.assert_called_with('a', 'b', 'steve', ['a'])
remove_job_labels.return_value = qtypes.QuantumJob(labels={})
assert job.remove_labels(['a', 'b', 'c']).labels() == {}
remove_job_labels.assert_called_with('a', 'b', 'steve', ['a', 'b', 'c'])
def test_processor_ids():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=qtypes.QuantumJob(
scheduling_config=qtypes.SchedulingConfig(
processor_selector=qtypes.SchedulingConfig.ProcessorSelector(
processor_names=['projects/a/processors/p']
)
)
),
)
assert job.processor_ids() == ['p']
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job')
def test_status(get_job):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.RUNNING)
)
get_job.return_value = qjob
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
assert job.status() == 'RUNNING'
get_job.assert_called_once()
def test_failure():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(
state=qtypes.ExecutionStatus.State.FAILURE,
failure=qtypes.ExecutionStatus.Failure(
error_code=qtypes.ExecutionStatus.Failure.Code.SYSTEM_ERROR,
error_message='boom',
),
)
),
)
assert job.failure() == ('SYSTEM_ERROR', 'boom')
def test_failure_with_no_error():
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(
state=qtypes.ExecutionStatus.State.SUCCESS,
)
),
)
assert not job.failure()
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job')
def test_get_repetitions_and_sweeps(get_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
get_job.return_value = qtypes.QuantumJob(
run_context=_to_any(
v2.run_context_pb2.RunContext(
parameter_sweeps=[v2.run_context_pb2.ParameterSweep(repetitions=10)]
)
)
)
assert job.get_repetitions_and_sweeps() == (10, [cirq.UnitSweep])
get_job.assert_called_once_with('a', 'b', 'steve', True)
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job')
def test_get_repetitions_and_sweeps_v1(get_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
get_job.return_value = qtypes.QuantumJob(
run_context=_to_any(
v1.program_pb2.RunContext(
parameter_sweeps=[v1.params_pb2.ParameterSweep(repetitions=10)]
)
)
)
with pytest.raises(ValueError, match='v1 RunContext is not supported'):
job.get_repetitions_and_sweeps()
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job')
def test_get_repetitions_and_sweeps_unsupported(get_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
get_job.return_value = qtypes.QuantumJob(
run_context=qtypes.any_pb2.Any(type_url='type.googleapis.com/unknown.proto')
)
with pytest.raises(ValueError, match='unsupported run_context type: unknown.proto'):
job.get_repetitions_and_sweeps()
def test_get_processor():
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(processor_name='projects/a/processors/p')
)
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert job.get_processor().processor_id == 'p'
def test_get_processor_no_processor():
qjob = qtypes.QuantumJob(execution_status=qtypes.ExecutionStatus())
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert not job.get_processor()
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_calibration')
def test_get_calibration(get_calibration):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(
calibration_name='projects/a/processors/p/calibrations/123'
)
)
calibration = qtypes.QuantumCalibration(
data=_to_any(
Merge(
"""
timestamp_ms: 123000,
metrics: [{
name: 'xeb',
targets: ['0_0', '0_1'],
values: [{
double_val: .9999
}]
}, {
name: 't1',
targets: ['0_0'],
values: [{
double_val: 321
}]
}, {
name: 'globalMetric',
values: [{
int32_val: 12300
}]
}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)
)
)
get_calibration.return_value = calibration
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert list(job.get_calibration()) == ['xeb', 't1', 'globalMetric']
get_calibration.assert_called_once_with('a', 'p', 123)
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_calibration')
def test_calibration__with_no_calibration(get_calibration):
job = cg.EngineJob(
'a',
'b',
'steve',
EngineContext(),
_job=qtypes.QuantumJob(
name='projects/project-id/programs/test/jobs/test',
execution_status={'state': 'SUCCESS'},
),
)
calibration = job.get_calibration()
assert not calibration
assert not get_calibration.called
@mock.patch('cirq_google.engine.engine_client.EngineClient.cancel_job')
def test_cancel(cancel_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
job.cancel()
cancel_job.assert_called_once_with('a', 'b', 'steve')
@mock.patch('cirq_google.engine.engine_client.EngineClient.delete_job')
def test_delete(delete_job):
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
job.delete()
delete_job.assert_called_once_with('a', 'b', 'steve')
RESULTS = qtypes.QuantumResult(
result=_to_any(
Merge(
"""
sweep_results: [{
repetitions: 4,
parameterized_results: [{
params: {
assignments: {
key: 'a'
value: 1
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\006'
}]
}
},{
params: {
assignments: {
key: 'a'
value: 2
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\005'
}]
}
}]
}]
""",
v2.result_pb2.Result(),
)
)
)
BATCH_RESULTS = qtypes.QuantumResult(
result=_to_any(
Merge(
"""
results: [{
sweep_results: [{
repetitions: 3,
parameterized_results: [{
params: {
assignments: {
key: 'a'
value: 1
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\006'
}]
}
},{
params: {
assignments: {
key: 'a'
value: 2
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\007'
}]
}
}]
}],
},{
sweep_results: [{
repetitions: 4,
parameterized_results: [{
params: {
assignments: {
key: 'a'
value: 3
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\013'
}]
}
},{
params: {
assignments: {
key: 'a'
value: 4
}
},
measurement_results: {
key: 'q'
qubit_measurement_results: [{
qubit: {
id: '1_1'
}
results: '\011'
}]
}
}]
}]
}]
""",
v2.batch_pb2.BatchResult(),
)
)
)
CALIBRATION_RESULT = qtypes.QuantumResult(
result=_to_any(
Merge(
"""
results: [{
code: ERROR_CALIBRATION_FAILED
error_message: 'uh oh'
token: 'abc'
valid_until_ms: 1234567891000
metrics: {
timestamp_ms: 1234567890000,
metrics: [{
name: 'theta',
targets: ['0_0', '0_1'],
values: [{
double_val: .9999
}]
}]
}
}]
""",
v2.calibration_pb2.FocusedCalibrationResult(),
)
)
)
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_results(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
data = job.results()
assert len(data) == 2
assert str(data[0]) == 'q=0110'
assert str(data[1]) == 'q=1010'
get_job_results.assert_called_once_with('a', 'b', 'steve')
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_results_iter(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
results = [str(r) for r in job]
assert len(results) == 2
assert results[0] == 'q=0110'
assert results[1] == 'q=1010'
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_results_getitem(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert str(job[0]) == 'q=0110'
assert str(job[1]) == 'q=1010'
with pytest.raises(IndexError):
_ = job[2]
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_batched_results(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
get_job_results.return_value = BATCH_RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
data = job.results()
assert len(data) == 4
assert str(data[0]) == 'q=011'
assert str(data[1]) == 'q=111'
assert str(data[2]) == 'q=1101'
assert str(data[3]) == 'q=1001'
get_job_results.assert_called_once_with('a', 'b', 'steve')
data = job.batched_results()
assert len(data) == 2
assert len(data[0]) == 2
assert len(data[1]) == 2
assert str(data[0][0]) == 'q=011'
assert str(data[0][1]) == 'q=111'
assert str(data[1][0]) == 'q=1101'
assert str(data[1][1]) == 'q=1001'
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_batched_results_not_a_batch(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
with pytest.raises(ValueError, match='batched_results'):
job.batched_results()
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_calibration_results(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
get_job_results.return_value = CALIBRATION_RESULT
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
data = job.calibration_results()
get_job_results.assert_called_once_with('a', 'b', 'steve')
assert len(data) == 1
assert data[0].code == v2.calibration_pb2.ERROR_CALIBRATION_FAILED
assert data[0].error_message == 'uh oh'
assert data[0].token == 'abc'
assert data[0].valid_until.timestamp() == 1234567891
assert len(data[0].metrics)
assert data[0].metrics['theta'] == {(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)): [0.9999]}
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_calibration_defaults(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
result = v2.calibration_pb2.FocusedCalibrationResult()
result.results.add()
get_job_results.return_value = qtypes.QuantumResult(result=_to_any(result))
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
data = job.calibration_results()
get_job_results.assert_called_once_with('a', 'b', 'steve')
assert len(data) == 1
assert data[0].code == v2.calibration_pb2.CALIBRATION_RESULT_UNSPECIFIED
assert data[0].error_message is None
assert data[0].token is None
assert data[0].valid_until is None
assert len(data[0].metrics) == 0
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_calibration_results_not_a_calibration(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
with pytest.raises(ValueError, match='calibration results'):
job.calibration_results()
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job_results')
def test_results_len(get_job_results):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.SUCCESS)
)
get_job_results.return_value = RESULTS
job = cg.EngineJob('a', 'b', 'steve', EngineContext(), _job=qjob)
assert len(job) == 2
@mock.patch('cirq_google.engine.engine_client.EngineClient.get_job')
@mock.patch('time.sleep', return_value=None)
def test_timeout(patched_time_sleep, get_job):
qjob = qtypes.QuantumJob(
execution_status=qtypes.ExecutionStatus(state=qtypes.ExecutionStatus.State.RUNNING)
)
get_job.return_value = qjob
job = cg.EngineJob('a', 'b', 'steve', EngineContext(timeout=500))
with pytest.raises(RuntimeError, match='Timed out'):
job.results()
def test_str():
job = cg.EngineJob('a', 'b', 'steve', EngineContext())
assert str(job) == 'EngineJob(project_id=\'a\', program_id=\'b\', job_id=\'steve\')'
| apache-2.0 | -871,607,955,485,668,900 | 30.85061 | 95 | 0.582177 | false |
vherman3/AxonSegmentation | AxonDeepSeg/learn_model.py | 1 | 12287 | import tensorflow as tf
import math
import numpy as np
import os
import pickle
import time
from learning.input_data import input_data
import sys
def learn_model(trainingset_path, model_path, model_restored_path = None, learning_rate = None, verbose = 1):
if not learning_rate :
learning_rate = 0.0005
# Divers variables
Loss = []
Epoch = []
Accuracy = []
Report = ''
verbose = 1
# Training or Predicting
restore = True
# Results and Models
folder_model = model_path
if not os.path.exists(folder_model):
os.makedirs(folder_model)
display_step = 100
save_step = 600
# Network Parameters
image_size = 256
n_input = image_size * image_size
n_classes = 2
dropout = 0.75
depth = 6
hyperparameters = {'depth': depth,'dropout': dropout, 'image_size': image_size,
'model_restored_path': model_restored_path, 'restore': restore}
with open(folder_model+'/hyperparameters.pkl', 'wb') as handle :
pickle.dump(hyperparameters, handle)
# Optimization Parameters
batch_size = 1
training_iters = 500000
epoch_size = 200
Report += '\n\n---Savings---'
Report += '\n Model saved in : '+ folder_model
Report += '\n\n---PARAMETERS---\n'
Report += 'learning_rate : '+ str(learning_rate)+'; \n batch_size : ' + str(batch_size) +';\n depth : ' + str(depth) \
+';\n epoch_size: ' + str(epoch_size)+';\n dropout : ' + str(dropout)+';\n restore : ' + str(restore)\
+';\n (if model restored) restored_model :' + str(model_restored_path)
data_train = input_data(trainingset_path=trainingset_path, type='train')
data_test = input_data(trainingset_path=trainingset_path, type='test')
# Graph input
x = tf.placeholder(tf.float32, shape=(batch_size, image_size, image_size))
y = tf.placeholder(tf.float32, shape=(batch_size*n_input, n_classes))
keep_prob = tf.placeholder(tf.float32)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout, image_size = image_size):
# Reshape input picture
x = tf.reshape(x, shape=[-1, image_size, image_size, 1])
data_temp = x
data_temp_size = [image_size]
relu_results = []
# contraction
for i in range(depth):
conv1 = conv2d(data_temp, weights['wc1'][i], biases['bc1'][i])
conv2 = conv2d(conv1, weights['wc2'][i], biases['bc2'][i])
relu_results.append(conv2)
conv2 = maxpool2d(conv2, k=2)
data_temp_size.append(data_temp_size[-1]/2)
data_temp = conv2
conv1 = conv2d(data_temp, weights['wb1'], biases['bb1'])
conv2 = conv2d(conv1, weights['wb2'], biases['bb2'])
data_temp_size.append(data_temp_size[-1])
data_temp = conv2
# expansion
for i in range(depth):
data_temp = tf.image.resize_images(data_temp, data_temp_size[-1] * 2, data_temp_size[-1] * 2)
upconv = conv2d(data_temp, weights['upconv'][i], biases['upconv'][i])
data_temp_size.append(data_temp_size[-1]*2)
upconv_concat = tf.concat(concat_dim=3, values=[tf.slice(relu_results[depth-i-1], [0, 0, 0, 0],
[-1, data_temp_size[depth-i-1], data_temp_size[depth-i-1], -1]), upconv])
conv1 = conv2d(upconv_concat, weights['we1'][i], biases['be1'][i])
conv2 = conv2d(conv1, weights['we2'][i], biases['be2'][i])
data_temp = conv2
finalconv = tf.nn.conv2d(conv2, weights['finalconv'], strides=[1, 1, 1, 1], padding='SAME')
final_result = tf.reshape(finalconv, tf.TensorShape([finalconv.get_shape().as_list()[0] * data_temp_size[-1] * data_temp_size[-1], 2]))
return final_result
weights = {'wc1':[],'wc2':[],'we1':[],'we2':[],'upconv':[],'finalconv':[],'wb1':[], 'wb2':[]}
biases = {'bc1':[],'bc2':[],'be1':[],'be2':[],'finalconv_b':[],'bb1':[], 'bb2':[],'upconv':[]}
# Contraction
for i in range(depth):
if i == 0:
num_features_init = 1
num_features = 64
else:
num_features = num_features_init * 2
# Store layers weight & bias
weights['wc1'].append(tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))), name = 'wc1-%s'%i))
weights['wc2'].append(tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name = 'wc2-%s'%i))
biases['bc1'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='bc1-%s'%i))
biases['bc2'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='bc2-%s'%i))
image_size = image_size/2
num_features_init = num_features
num_features = num_features_init*2
weights['wb1']= tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))),name='wb1-%s'%i)
weights['wb2']= tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='wb2-%s'%i)
biases['bb1']= tf.Variable(tf.random_normal([num_features]), name='bb2-%s'%i)
biases['bb2']= tf.Variable(tf.random_normal([num_features]), name='bb2-%s'%i)
num_features_init = num_features
for i in range(depth):
num_features = num_features_init/2
weights['upconv'].append(tf.Variable(tf.random_normal([2, 2, num_features_init, num_features]), name='upconv-%s'%i))
biases['upconv'].append(tf.Variable(tf.random_normal([num_features]), name='bupconv-%s'%i))
weights['we1'].append(tf.Variable(tf.random_normal([3, 3, num_features_init, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features_init)))), name='we1-%s'%i))
weights['we2'].append(tf.Variable(tf.random_normal([3, 3, num_features, num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='we2-%s'%i))
biases['be1'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='be1-%s'%i))
biases['be2'].append(tf.Variable(tf.random_normal([num_features], stddev=math.sqrt(2.0/(9.0*float(num_features)))), name='be2-%s'%i))
num_features_init = num_features
weights['finalconv']= tf.Variable(tf.random_normal([1, 1, num_features, n_classes]), name='finalconv-%s'%i)
biases['finalconv_b']= tf.Variable(tf.random_normal([n_classes]), name='bfinalconv-%s'%i)
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
tf.scalar_summary('Loss', cost)
index = tf.Variable(0, trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
mask = tf.argmax(pred, 1)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
init = tf.initialize_all_variables()
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
# Launch the graph
Report += '\n\n---Intermediary results---\n'
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
last_epoch = 0
if model_restored_path :
folder_restored_model = model_restored_path
saver.restore(sess, folder_restored_model+"/model.ckpt")
file = open(folder_restored_model+'/evolution.pkl','r')
evolution_restored = pickle.load(file)
last_epoch = evolution_restored["steps"][-1]
else:
sess.run(init)
print 'training start'
step = 1
epoch = 1 + last_epoch
while step * batch_size < training_iters:
batch_x, batch_y = data_train.next_batch(batch_size, rnd = True, augmented_data= True)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc, p = sess.run([cost, accuracy, pred], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1., index: step*batch_size})
prediction = data_train.read_batch(p, batch_size)[0, :, :, 0]
ground_truth = data_train.read_batch(batch_y, batch_size)[0, :, :, 0]
if verbose == 2:
outputs = "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
print outputs
if step % epoch_size == 0 :
start = time.time()
A = []
L = []
print epoch
data_test.set_batch_start()
print data_test.batch_start
for i in range(data_test.set_size):
batch_x, batch_y = data_test.next_batch(batch_size, rnd=False, augmented_data= False)
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1.})
A.append(acc)
L.append(loss)
if verbose >= 1:
print '--\nAccuracy on patch'+str(i)+': '+str(acc)
print 'Loss on patch'+str(i)+': '+str(loss)
Accuracy.append(np.mean(A))
Loss.append(np.mean(L))
Epoch.append(epoch)
output_2 = '\n----\n Epoch: ' + str(epoch)
output_2+= '\n Accuracy: ' + str(np.mean(A))+';'
output_2+= '\n Loss: ' + str(np.mean(L))+';'
print '\n\n----Scores on test:---' + output_2
Report+= output_2
epoch+=1
if step % save_step == 0:
evolution = {'loss': Loss, 'steps': Epoch, 'accuracy': Accuracy}
with open(folder_model+'/evolution.pkl', 'wb') as handle:
pickle.dump(evolution, handle)
save_path = saver.save(sess, folder_model+"/model.ckpt")
print("Model saved in file: %s" % save_path)
file = open(folder_model+"/report.txt", 'w')
file.write(Report)
file.close()
step += 1
save_path = saver.save(sess, folder_model+"/model.ckpt")
evolution = {'loss': Loss, 'steps': Epoch, 'accuracy': Accuracy}
with open(folder_model+'/evolution.pkl', 'wb') as handle :
pickle.dump(evolution, handle)
print("Model saved in file: %s" % save_path)
print "Optimization Finished!"
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--path_training", required=True, help="")
ap.add_argument("-m", "--path_model", required=True, help="")
ap.add_argument("-m_init", "--path_model_init", required=False, help="")
ap.add_argument("-lr", "--learning_rate", required=False, help="")
args = vars(ap.parse_args())
path_training = args["path_training"]
path_model = args["path_model"]
path_model_init = args["path_model_init"]
learning_rate = args["learning_rate"]
if learning_rate :
learning_rate = float(args["learning_rate"])
else : learning_rate = None
learn_model(path_training, path_model, path_model_init, learning_rate) | mit | 6,847,251,026,175,072,000 | 39.96 | 172 | 0.567348 | false |
WebAssembly/sexpr-wasm-prototype | test/update-spec-tests.py | 1 | 3609 | #!/usr/bin/env python3
#
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DIR = SCRIPT_DIR
REPO_ROOT_DIR = os.path.dirname(SCRIPT_DIR)
TESTSUITE_DIR = os.path.join(REPO_ROOT_DIR, 'third_party', 'testsuite')
SPEC_TEST_DIR = os.path.join(TEST_DIR, 'spec')
WASM2C_SPEC_TEST_DIR = os.path.join(TEST_DIR, 'wasm2c', 'spec')
options = None
def GetFilesWithExtension(src_dir, want_ext):
result = set()
if os.path.exists(src_dir):
for filename in os.listdir(src_dir):
name, ext = os.path.splitext(filename)
if ext == want_ext:
result.add(name)
return result
def ProcessDir(wabt_test_dir, testsuite_dir, tool, flags=None):
testsuite_tests = GetFilesWithExtension(testsuite_dir, '.wast')
wabt_tests = GetFilesWithExtension(wabt_test_dir, '.txt')
for removed_test_name in wabt_tests - testsuite_tests:
test_filename = os.path.join(wabt_test_dir, removed_test_name + '.txt')
if options.verbose:
print('Removing %s' % test_filename)
os.remove(test_filename)
for added_test_name in testsuite_tests - wabt_tests:
wast_filename = os.path.join(
os.path.relpath(testsuite_dir, REPO_ROOT_DIR),
added_test_name + '.wast')
test_filename = os.path.join(wabt_test_dir, added_test_name + '.txt')
if options.verbose:
print('Adding %s' % test_filename)
test_dirname = os.path.dirname(test_filename)
if not os.path.exists(test_dirname):
os.makedirs(test_dirname)
with open(test_filename, 'w') as f:
f.write(';;; TOOL: %s\n' % tool)
f.write(';;; STDIN_FILE: %s\n' % wast_filename)
if flags:
f.write(';;; ARGS*: %s\n' % flags)
def ProcessProposalDir(name, flags=None):
ProcessDir(os.path.join(SPEC_TEST_DIR, name),
os.path.join(TESTSUITE_DIR, 'proposals', name),
'run-interp-spec',
flags)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', help='print more diagnotic messages.',
action='store_true')
global options
options = parser.parse_args(args)
ProcessDir(SPEC_TEST_DIR, TESTSUITE_DIR, 'run-interp-spec')
ProcessDir(WASM2C_SPEC_TEST_DIR, TESTSUITE_DIR, 'run-spec-wasm2c')
ProcessProposalDir('multi-value', '--enable-multi-value')
ProcessProposalDir('mutable-global') # Already enabled by default.
ProcessProposalDir('nontrapping-float-to-int-conversions',
'--enable-saturating-float-to-int')
ProcessProposalDir('sign-extension-ops', '--enable-sign-extension')
ProcessProposalDir('bulk-memory-operations', '--enable-bulk-memory')
ProcessProposalDir('reference-types', '--enable-reference-types')
ProcessProposalDir('simd', '--enable-simd')
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 5,995,394,094,434,507,000 | 34.732673 | 81 | 0.651704 | false |
openconfig/oc-pyang | openconfig_pyang/plugins/util/html_helper_test.py | 1 | 2086 | """
Copyright 2015 Google, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Test data for html_helper
"""
import html_helper
def main():
a_list = ['red', 'green', 'blue', 'orange']
text = 'a line of text'
attrs = {"class":"my-css-class", "id":"element-id"}
tag = "span"
label = "label:"
paragraph = "Lorem ipsum dolor sit amet, consectetur adipiscing\
elit. Nunc maximus, dui non sollicitudin sollicitudin, leo nibh\
luctus orci, varius maximus lacus nulla eget nibh. Nulla faucibus\
purus nulla, eu molestie massa cursus vitae. Vestibulum metus purus,\
tempus sed risus ac, lobortis efficitur lorem."
ht = html_helper.HTMLHelper()
print ht.h1(text)
print ht.h1(text, attrs)
print "\n"
print ht.h2(text)
print ht.h2(text, attrs)
print "\n"
print ht.h3(text)
print ht.h3(text, attrs)
print "\n"
print ht.h4(text)
print ht.h4(text, attrs)
print "\n"
print ht.h5(text)
print ht.h5(text, attrs)
print "\n"
print ht.h6(text)
print ht.h6(text, attrs)
print ht.h1(text, attrs, 5, True)
print ht.h1(text, attrs, 2, False)
print ht.h(8,text,attrs)
print ht.h(-1,text,attrs)
print ht.hr()
print ht.add_tag (tag, text, attrs)
print ht.add_tag (tag, text)
print "\n"
print ht.para(paragraph, attrs)
print "\n"
print ht.para(ht.add_tag(tag,label) + paragraph)
print "\n"
print ht.open_tag("div")
print ht.para(paragraph)
print ht.close_tag()
# print md.ol(a_list)
# print md.ul(a_list)
# print md.hr()
# print md.i(text)
# print md.b(text)
# print md.code(text)
if __name__ == '__main__':
main( )
| apache-2.0 | 2,804,690,863,647,126,000 | 21.923077 | 72 | 0.678811 | false |
blackjade/aci2xml | aci2xml.py | 1 | 4226 |
#
# Copyright (c) 2015 Fluke Networks.
# All rights reserved.
# No part of this source code may be copied, used, or modified
# without the express written consent of Fluke Networks.
#
# aci2xml.py: Convert the policy manager related section in a *.aci
# file to xml. For example, these lines:
# [\PolicyManager\Alarm0]
# Enable=D_1
# Count_Of_Threshold=D_1
# [\PolicyManager\Alarm0\Threshold0]
# Severity=D_2
# SeverityScore=D_100
# Action=S_Beep
# GroupType=D_2
# SSIDGroupCount=D_1
# SSIDGroup=S_MyWLAN
# ACLGroupCount=D_2
# ACLGroups=S_0,1
# Will be converted to this:
# <Alarm0>
# <AlarmEnabled>1</AlarmEnabled>
# <ThresholdCount>1</ThresholdCount>
# <Threshold0>
# <Severity>2</Severity>
# <Action>Beep</Action>
# <ThresholdInt>0</ThresholdInt>
# <ThresholdString/>
# <GroupType>2</GroupType>
# <IntArray_Count>0</IntArray_Count>
# <IntArray/>
# <FrameCount>50</FrameCount>
# <SignalStrength>15</SignalStrength>
# <IntMap_Count>0</IntMap_Count>
# <IntMap/>
# <SSIDGroups_Count>1</SSIDGroups_Count>
# <SSIDGroups>MyWLAN</SSIDGroups>
# <ACLGroups_Count>1</ACLGroups_Count>
# <ACLGroups>0</ACLGroups>
# </Threshold0>
# </Alarm0>
import os, argparse
import json
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import tostring
import xml.dom.minidom as minidom
def dictToXmlElement(tag, xmlDict):
'''
Convert a dict to xml element
'''
if not xmlDict or not isinstance(xmlDict, dict):
return None
elem = Element(tag)
for key, val in xmlDict.items():
if isinstance(val, dict):
# The next level is also a dict. recursive call to convert any depth
child = dictToXmlElement(key, val)
else:
child = Element(key)
child.text = str(val)
elem.append(child)
return elem
def readAci(fileName):
xmlRoot = dict()
with open(fileName) as f:
currNode = None
for s in f:
s = s.strip()
#print s
if s.startswith('[\\') and s.endswith(']'):
s = s[1:-1].strip()
if s == "":
currNode = None
continue
xmlKeys = s.split('\\')
currNode = xmlRoot
for key in xmlKeys:
if key == "":
continue
if not key in currNode:
currNode[key] = dict()
currNode = currNode[key]
elif '=' in s:
if currNode == None:
print s
else:
pos = s.find('=')
key = s[0:pos]
value = s[pos+3:]
currNode[key] = value
return xmlRoot
def writePolicyManagerXml(xmlDict, fileName):
'''
COnvert a simple dict from reading aci file to xml tree
'''
if 'PolicyManager' in xmlDict:
xmlElem = dictToXmlElement('PolicyManager', xmlDict['PolicyManager'])
xmlString = tostring(xmlElem)
reparsed = minidom.parseString(xmlString)
with open(fileName, 'wb') as f:
reparsed.writexml(f, indent="\t", addindent="\t", newl="\n")
print 'Policy written to:', fileName
def main():
#parser = argparse.ArgumentParser(description='Convert the policy manager related section in a .aci file to xml file.')
#parser.add_argument('aciFile', type=str, help='ACI file name', nargs='?', default='./config/Default.aci')
#parser.add_argument('xmlFile', type=str, help='XML file name', nargs='?', default='./config/Default.xml')
#args = parser.parse_args()
aciFile = './config/Default.aci'
xmlFile = './config/Default.xml'
print 'Converting', aciFile, '->', xmlFile
xmlDict = readAci(aciFile)
if not xmlDict:
print 'Can not open the xml file or it is empty:', xmlFile
writePolicyManagerXml(xmlDict, xmlFile)
print 'Done!'
if __name__ == '__main__':
main()
| apache-2.0 | 7,077,029,358,062,470,000 | 30.015152 | 123 | 0.559158 | false |
freundTech/deepl-cli | deepl/__main__.py | 1 | 2176 | import argparse
import locale
import sys
from deepl import translator
def print_results(result, extra_data, verbose=False):
if verbose:
print("Translated from {} to {}".format(extra_data["source"], extra_data["target"]))
print(result)
def main():
parser = argparse.ArgumentParser(description="Translate text to other languages using deepl.com")
parser.add_argument("-s", "--source", help="Source language", metavar="lang")
parser.add_argument("-t", "--target", help="Target language", metavar="lang")
parser.add_argument("-i", "--interactive", help="Force interactive mode", action="store_true")
parser.add_argument("-v", "--verbose", help="Print additional information", action="store_true")
parser.add_argument("text", nargs='*')
args = parser.parse_args()
locale_ = locale.getdefaultlocale()
preferred_langs = [locale_[0].split("_")[0].upper()]
if not args.source is None:
source = args.source.upper()
else:
source = 'auto'
if not args.target is None:
target = args.target.upper()
else:
target = None
if len(args.text) == 0 or args.interactive:
if sys.stdin.isatty() or args.interactive:
print("Please input text to translate")
while True:
text = input("> ")
result, extra_data = translator.translate(text, source, target, preferred_langs)
print_results(result, extra_data, args.verbose)
if extra_data["source"] not in preferred_langs:
preferred_langs.append(extra_data["source"])
if extra_data["target"] not in preferred_langs:
preferred_langs.append(extra_data["target"])
else:
text = sys.stdin.read()
result, extra_data = translator.translate(text, source, target, preferred_langs)
print_results(result, extra_data, args.verbose)
else:
text = " ".join(args.text)
result, extra_data = translator.translate(text, source, target, preferred_langs)
print_results(result, extra_data, args.verbose)
if __name__ == "__main__":
main() | mit | -357,198,274,125,609,200 | 35.283333 | 101 | 0.617188 | false |
fredericklussier/ObservablePy | tests/testObserverStore.py | 1 | 5291 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import unittest
from observablePy.ObserverStore import ObserverStore
from observablePy.ObserverTypeEnum import observerTypeEnum
class ObserverStoreTests(unittest.TestCase):
"""
setUp each test
"""
def setUp(self):
self.observers = ObserverStore()
"""
tearDown each test
"""
def tearDown(self):
self.observers = None
"""
Init
"""
def testInit_ShouldInitiateValue(self):
# Arrange see setUp
# Action see setUp
# Assert
self.assertFalse(self.observers.hasObservers())
"""
add
"""
def testAdd_UsingSingleElement_ShouldAdd(self):
# Arrange
def changeHandle():
print("Changes")
# Action
self.observers.add("voltage", changeHandle)
# Assert
self.assertTrue(self.observers.hasObservers())
self.assertEqual(
self.observers._observers,
[{"observing": "voltage",
"type": observerTypeEnum.element,
"call": changeHandle}])
def testAdd_UsingMultiElements_ShouldAdd(self):
# Arrange
def changeHandle():
print("Changes")
# Action
self.observers.add(["voltage", "level"], changeHandle)
# Assert
self.assertTrue(self.observers.hasObservers())
self.assertEqual(
self.observers._observers,
[{"observing": ["voltage", "level"],
"type": observerTypeEnum.listOfElements,
"call": changeHandle}])
def testAdd_UsingAllElements_ShouldAdd(self):
# Arrange
def changeHandle():
print("Changes")
# Action
self.observers.add("*", changeHandle)
# Assert
self.assertTrue(self.observers.hasObservers())
self.assertEqual(
self.observers._observers,
[{"observing": "*",
"type": observerTypeEnum.state,
"call": changeHandle}])
def testAdd_UsingUnknowElement_ShouldRaiseError(self):
# Arrange
def changeHandle():
print("Changes")
# Actionand Assert
with self.assertRaises(TypeError):
self.observers.add(12, changeHandle)
def testAdd_WhenNotCallable_ShoulRaiseError(self):
# Arrange
def changeHandle():
print("Changes")
# Action and Assert
with self.assertRaises(TypeError):
# str is not callable
self.observers.add("voltage", "changeHandle")
"""
remove
"""
def testRemove_ShouldRemove(self):
# Arrange
def changeHandle():
print("Changes")
self.observers.add("voltage", changeHandle)
self.observers.add("level", changeHandle)
self.observers.add("plugged", changeHandle)
# Action
self.observers.remove("plugged", changeHandle)
# Assert
self.assertTrue(self.observers.hasObservers())
self.assertEqual(
self.observers.getObservers(),
[{
"observing": "voltage",
"call": changeHandle
}, {
"observing": "level",
"call": changeHandle
}])
"""
removeAll
"""
def testRemoveAll_ShouldRemoveAll(self):
# Arrange
def changeHandle():
print("Changes")
self.observers.add("voltage", changeHandle)
self.observers.add("level", changeHandle)
self.observers.add("plugged", changeHandle)
# Action
self.observers.removeAll()
# Assert
self.assertFalse(self.observers.hasObservers())
"""
iteration generator
"""
def testIteration_ShouldIter(self):
# Arrange
def changeHandle():
print("Changes")
self.observers.add("voltage", changeHandle)
self.observers.add("level", changeHandle)
self.observers.add("plugged", changeHandle)
# Action
index = 0
observers = self.observers.iterationGenerator()
for observer in observers:
# Assert
self.assertEqual(
observer["observing"],
["voltage", "level", "plugged"][index])
index += 1
def testIteration_UsingArray_ShouldIter(self):
# Arrange
def changeHandle():
print("Changes")
self.observers.add("voltage", changeHandle)
self.observers.add(["level", "voltage"], changeHandle)
self.observers.add("plugged", changeHandle)
# Action
actualResult = len(list(self.observers.iterationGenerator("voltage")))
# Assert
self.assertEqual(actualResult, 2)
def testIteration_UsingAll_ShouldIter(self):
# Arrange
def changeHandle():
print("Changes")
self.observers.add("voltage", changeHandle)
self.observers.add(["level", "voltage"], changeHandle)
self.observers.add("*", changeHandle)
self.observers.add("plugged", changeHandle)
# Action
actualResult = len(list(self.observers.iterationGenerator("voltage")))
# Assert
self.assertEqual(actualResult, 3)
| mit | 4,673,484,907,610,565,000 | 25.193069 | 78 | 0.571726 | false |
AvengerMoJo/DeepSea | srv/modules/runners/upgrade.py | 1 | 3678 | # -*- coding: utf-8 -*-
# pylint: disable=modernize-parse-error
"""
Verify that an automated upgrade is possible
"""
from __future__ import absolute_import
from __future__ import print_function
# pylint: disable=import-error,3rd-party-module-not-gated,redefined-builtin
import salt.client
import salt.utils.error
class UpgradeValidation(object):
"""
Due to the current situation you have to upgrade
all monitors before ceph allows you to start any OSD
Our current implementation of maintenance upgrades
triggers this behavior if you happen to have
Monitors and Storage roles assigned on the same node
(And more then one monitor)
To avoid this, before actually providing a proper solution,
we stop users to execute the upgade in the first place.
"""
def __init__(self, cluster='ceph'):
"""
Initialize Salt client, cluster
"""
self.local = salt.client.LocalClient()
self.cluster = cluster
def colocated_services(self):
"""
Check for shared monitor and storage roles
"""
search = "I@cluster:{}".format(self.cluster)
pillar_data = self.local.cmd(
search, 'pillar.items', [], tgt_type="compound")
for host in pillar_data:
if 'roles' in pillar_data[host]:
if ('storage' in pillar_data[host]['roles']
and 'mon' in pillar_data[host]['roles']):
msg = """
************** PLEASE READ ***************
We currently do not support upgrading when
you have a monitor and a storage role
assigned on the same node.
******************************************"""
return False, msg
return True, ""
def is_master_standalone(self):
"""
Check for shared master and storage role
"""
search = "I@roles:master"
pillar_data = self.local.cmd(
search, 'pillar.items', [], tgt_type="compound")
# in case of multimaster
for host in pillar_data:
if 'roles' in pillar_data[host]:
if 'storage' in pillar_data[host]:
msg = """
************** PLEASE READ ***************
Detected a storage role on your master.
This is not supported. Please migrate all
OSDs off the master in order to continue.
******************************************"""
return False, msg
return True, ""
@staticmethod
def is_supported():
"""
Check if the automated upgrade is supported
"""
msg = """
************** PLEASE READ ***************
The automated upgrade is currently not supported.
Please refer to the official documentation.
******************************************"""
return False, msg
def help_():
"""
Usage
"""
usage = (
'salt-run upgrade.check:\n\n'
' Performs a series of checks to verify that upgrades are possible\n'
'\n\n')
print(usage)
return ""
def check():
"""
Run upgrade checks
"""
uvo = UpgradeValidation()
checks = [uvo.is_master_standalone,
uvo.is_supported] # , uvo.colocated_services]
for chk in checks:
ret, msg = chk()
if not ret:
print(msg)
return ret
return ret
__func_alias__ = {
'help_': 'help',
}
| gpl-3.0 | -6,941,238,278,801,299,000 | 31.263158 | 80 | 0.507069 | false |
clones/wtforms | wtforms/ext/appengine/fields.py | 1 | 3025 | import decimal
from wtforms import fields, widgets
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
"""
widget = widgets.Select()
def __init__(self, label=u'', validators=None, reference_class=None,
label_attr=None, allow_blank=False, blank_text=u'', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
self.label_attr = label_attr
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is None:
raise TypeError('Missing reference_class attribute in '
'ReferencePropertyField')
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
key = str(obj.key())
if key == self._formdata:
self._set_data(key)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.label_attr and getattr(obj, self.label_attr) or key
yield (key, label, key == self.data)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if self.data == str(obj.key()):
break
else:
raise ValueError(self.gettext(u'Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def process_data(self, value):
if isinstance(value, list):
value = '\n'.join(value)
self.data = value
def populate_obj(self, obj, name):
if isinstance(self.data, basestring):
value = self.data.splitlines()
else:
value = []
setattr(obj, name, value)
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = u'%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError(u'Not a valid coordinate location')
| bsd-3-clause | -4,359,445,590,405,354,500 | 30.842105 | 100 | 0.549752 | false |
pyrocko/pyrocko | src/apps/colosseo.py | 1 | 7525 | from __future__ import print_function
# http://pyrocko.org - GPLv3
#
# The Pyrocko Developers, 21st Century
# ---|P------/S----------~Lg----------
import sys
import logging
import os.path as op
from optparse import OptionParser
from pyrocko import util, scenario, guts, gf
from pyrocko import __version__
logger = logging.getLogger('pyrocko.apps.colosseo')
km = 1000.
def d2u(d):
return dict((k.replace('-', '_'), v) for (k, v) in d.items())
description = '''This is Colosseo, an earthquake scenario generator.
Create seismic waveforms, InSAR and GNSS offsets for a simulated earthquake
scenario.
Colosseo is part of Pyrocko. Version %s.
''' % __version__
subcommand_descriptions = {
'init': 'initialize a new, blank scenario',
'fill': 'fill the scenario with modelled data',
'snuffle': 'open Snuffler to inspect the waveform data',
'map': 'map the scenario arena'
}
subcommand_usages = {
'init': 'init <scenario_dir>',
'fill': 'fill <scenario_dir>',
'snuffle': 'snuffle <scenario_dir>',
'map': '<scenario_dir>',
}
subcommands = subcommand_descriptions.keys()
program_name = 'colosseo'
usage_tdata = d2u(subcommand_descriptions)
usage_tdata['program_name'] = program_name
usage_tdata['description'] = description
usage = '''%(program_name)s <subcommand> [options] [--] <arguments> ...
%(description)s
Subcommands:
init %(init)s
fill %(fill)s
snuffle %(snuffle)s
map %(map)s
To get further help and a list of available options for any subcommand run:
%(program_name)s <subcommand> --help
''' % usage_tdata
def die(message, err='', prelude=''):
if prelude:
prelude = prelude + '\n'
if err:
err = '\n' + err
sys.exit('%s%s failed: %s%s' % (prelude, program_name, message, err))
def none_or_float(x):
if x == 'none':
return None
else:
return float(x)
def add_common_options(parser):
parser.add_option(
'--loglevel',
action='store',
dest='loglevel',
type='choice',
choices=('critical', 'error', 'warning', 'info', 'debug'),
default='info',
help='set logger level to '
'"critical", "error", "warning", "info", or "debug". '
'Default is "%default".')
def process_common_options(options):
util.setup_logging(program_name, options.loglevel)
def cl_parse(command, args, setup=None, details=None):
usage = subcommand_usages[command]
descr = subcommand_descriptions[command]
if isinstance(usage, str):
usage = [usage]
susage = '%s %s' % (program_name, usage[0])
for s in usage[1:]:
susage += '\n%s%s %s' % (' '*7, program_name, s)
description = descr[0].upper() + descr[1:] + '.'
if details:
description = description + ' %s' % details
parser = OptionParser(usage=susage, description=description)
if setup:
setup(parser)
add_common_options(parser)
(options, args) = parser.parse_args(args)
process_common_options(options)
return parser, options, args
def get_scenario_yml(path):
fn = op.join(path, 'scenario.yml')
if op.exists(fn):
return fn
return False
def command_init(args):
def setup(parser):
parser.add_option(
'--force', dest='force', action='store_true',
help='overwrite existing files')
parser.add_option(
'--location', dest='location', metavar='LAT,LON',
help='set scenario center location [deg]')
parser.add_option(
'--radius', dest='radius', metavar='RADIUS', type=float,
help='set scenario center location [km]')
parser, options, args = cl_parse('init', args, setup=setup)
if len(args) != 1:
parser.print_help()
sys.exit(1)
if options.location:
try:
lat, lon = map(float, options.location.split(','))
except Exception:
die('expected --location=LAT,LON')
else:
lat = lon = None
if options.radius is not None:
radius = options.radius * km
else:
radius = None
project_dir = args[0]
try:
scenario.ScenarioGenerator.initialize(
project_dir, lat, lon, radius, force=options.force)
gf_stores_path = op.join(project_dir, 'gf_stores')
util.ensuredir(gf_stores_path)
except scenario.CannotCreatePath as e:
die(str(e) + ' Use --force to override.')
except scenario.ScenarioError as e:
die(str(e))
def command_fill(args):
def setup(parser):
parser.add_option(
'--force', dest='force', action='store_true',
help='overwrite existing files')
parser, options, args = cl_parse('fill', args, setup=setup)
if len(args) == 0:
args.append('.')
fn = get_scenario_yml(args[0])
if not fn:
parser.print_help()
sys.exit(1)
project_dir = args[0]
gf_stores_path = op.join(project_dir, 'gf_stores')
try:
engine = get_engine([gf_stores_path])
sc = guts.load(filename=fn)
sc.init_modelling(engine)
sc.ensure_gfstores(interactive=True)
sc.prepare_data(path=project_dir, overwrite=options.force)
sc.ensure_data(path=project_dir)
sc.make_map(op.join(project_dir, 'map.pdf'))
except scenario.CannotCreatePath as e:
die(str(e) + ' Use --force to override.')
except scenario.ScenarioError as e:
die(str(e))
def command_map(args):
parser, options, args = cl_parse('map', args)
if len(args) == 0:
args.append('.')
fn = get_scenario_yml(args[0])
if not fn:
parser.print_help()
sys.exit(1)
project_dir = args[0]
gf_stores_path = op.join(project_dir, 'gf_stores')
engine = get_engine([gf_stores_path])
try:
sc = guts.load(filename=fn)
sc.init_modelling(engine)
sc.make_map(op.join(project_dir, 'map.pdf'))
except scenario.ScenarioError as e:
die(str(e))
def command_snuffle(args):
from pyrocko.gui import snuffler
parser, options, args = cl_parse('map', args)
if len(args) == 0:
args.append('.')
fn = get_scenario_yml(args[0])
if not fn:
parser.print_help()
sys.exit(1)
project_dir = args[0]
gf_stores_path = op.join(project_dir, 'gf_stores')
engine = get_engine([gf_stores_path])
sc = guts.load(filename=fn)
sc.init_modelling(engine)
return snuffler.snuffle(
sc.get_pile(),
stations=sc.get_stations(),
events=sc.get_events())
def main(args=None):
if args is None:
args = sys.argv[1:]
if len(args) < 1:
sys.exit('Usage: %s' % usage)
command = args.pop(0)
if command in subcommands:
globals()['command_' + command](args)
elif command in ('--help', '-h', 'help'):
if command == 'help' and args:
acommand = args[0]
if acommand in subcommands:
globals()['command_' + acommand](['--help'])
sys.exit('Usage: %s' % usage)
else:
sys.exit('%s: error: no such subcommand: %s' % (program_name, command))
def get_engine(gf_store_superdirs):
engine = gf.LocalEngine(
store_superdirs=gf_store_superdirs, use_config=True)
logger.info(
'Directories to be searched for GF stores:\n%s'
% '\n'.join(' ' + s for s in engine.store_superdirs))
return engine
if __name__ == '__main__':
main()
| gpl-3.0 | 5,485,994,843,530,995,000 | 22.964968 | 79 | 0.592292 | false |
Darktel/Homework | Translit.py | 1 | 1089 | __author__ = 'Darktel'
def translit(Mystring):
"""
String (Rus) -> String (Eng)
"""
RusString = 'а,б,в,г,д,е,ё,ж,з,и,й,к,л,м,н,о,п,р,с,т,у,ф,х,ц,ч,ш,щ,ы,ь,ъ,э,ю,я,*, ,'
EngString = "a,b,v,g,d,e,yo,zh,z,i,j,k,l,m,n,o,p,r,s,t,u,f,h,c,ch,sh,xh,y,',`,q,ju,ya,*,-,"
RusChar = RusString.split(',')
EngChar = EngString.split(',')
translitString = ''
for char in Mystring:
try:
if char.isupper():
charlow = char.lower()
index = RusChar.index(charlow)
translitString += EngChar[index].upper()
else:
index = RusChar.index(char)
translitString += EngChar[index]
except:
translitString += char
return translitString
testString = translit('''доска обрезная 50х150х6000
доска обрезная краснодар цена
цена куба доски обрезной
куб обрезной доски
купить доску обрезную
''')
print(testString)
| gpl-2.0 | 5,363,980,151,163,219,000 | 22.390244 | 95 | 0.555787 | false |
yephper/django | tests/transactions/tests.py | 1 | 20076 | from __future__ import unicode_literals
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from .models import Reporter
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with self.assertRaisesMessage(Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with self.assertRaisesMessage(Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with self.assertRaisesMessage(Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with self.assertRaisesMessage(OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
"""#20028 -- Atomic must support wrapping callable instances."""
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
"""#23074 -- Savepoints must be released after rollback."""
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with self.assertRaisesMessage(Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
def test_orm_query_without_autocommit(self):
"""#24921 -- ORM queries must be possible after set_autocommit(False)."""
transaction.set_autocommit(False)
try:
Reporter.objects.create(first_name="Tintin")
finally:
transaction.rollback()
transaction.set_autocommit(True)
| bsd-3-clause | 9,000,106,414,789,370,000 | 43.627273 | 85 | 0.616258 | false |
hornedbull/gmailPy | gmailPy.py | 1 | 13792 | # Vatsal Shah
# ECE-C433 Mini-Project 2
# gmailPy - A terminal gmail client
# Tested on Python 2.7.3
# imapclient is not a part of the standard python library
# install using sudo pip install imapclient
import getpass
from imapclient import IMAPClient
import operator
import email
import optparse
import sys
class gmailPy(object):
def __init__(self):
self.IMAP_SERVER = 'imap.gmail.com'
self.ssl = True
self.myIMAPc = None
self.response = None
self.folders = []
def login(self, username, password):
self.myIMAPc = IMAPClient(self.IMAP_SERVER, ssl=self.ssl)
self.myIMAPc.login(username, password)
# Returns a list of all the folders for a particular account
def get_folders(self):
self.response = self.myIMAPc.list_folders()
for item in self.response:
self.folders.append(item[2].strip('u'))
return self.folders
# Returns the total number of messages in a folder
def get_mail_count(self, folder='Inbox'):
self.response = self.myIMAPc.select_folder(folder, True)
return self.response['EXISTS']
# Method to delete messages based on their size
def delete_bigmail(self, folder='Inbox'):
self.myIMAPc.select_folder(folder, False)
# Gets all the message ids of the messages which are not deleted in the folder
messages = self.myIMAPc.search(['NOT DELETED'])
print "%d messages that aren't deleted" % len(messages)
if len(messages) > 0:
print "You can exit by entering 0 or pressing CTRL+C \n"
else: print "There are no messages in the folder"
# Gets the message sizes for all the message ids returned in previous step
# Note: Just sends one request for all message ids with a return time < 10 ms
self.response = self.myIMAPc.fetch(messages, ['RFC822.SIZE'])
# Sorts the dictionary returned by fetch by size in descending order
sorted_response = sorted(self.response.iteritems(), key=operator.itemgetter(1), reverse=True)
count = 1
try:
for item in sorted_response:
# Gets the biggest message including headers, body, etc.
big_message = self.myIMAPc.fetch(item[0], ['RFC822'])
for msgid, data in big_message.iteritems():
msg_string = data['RFC822']
# Parses the message string using email library
msg = email.message_from_string(msg_string)
val = dict(self.response[msgid])['RFC822.SIZE']
print 'ID %d: From: %s Date: %s' % (msgid, msg['From'], msg['date'])
print 'To: %s' % (msg['To'])
print 'Subject: %s' % (msg['Subject'])
print 'Size: %d bytes \n' % (val)
user_del = raw_input("Do you want to delete this message?(Y/N): ")
if user_del == 'Y':
self.delete_message(msgid)
if count == len(sorted_response):
print "There are no more messages"
else:
print "\nMoving on to the next biggest message >>> \n"
elif user_del == '0':
print "Program exiting"
sys.exit()
else:
if count == len(sorted_response):
print "There are no more messages"
else:
print "\nMoving on to the next biggest message >>> \n"
count += 1
except KeyboardInterrupt:
print "Program exiting"
sys.exit()
# Method to delete messages based on their size with a search criteria
def delete_bigmail_search(self, folder='Inbox', command='', criteria=''):
self.myIMAPc.select_folder(folder, False)
# Gets all the message ids from the server based on the search criteria
messages = self.myIMAPc.search('%s "%s"' % (command, criteria))
print "%d messages that match --> %s: %s" % (len(messages), command, criteria)
if len(messages) > 0:
print "You can exit by entering 0 or pressing CTRL+C \n"
else: print "There are no messages in that matched your search criteria"
# Gets the message sizes for all the message ids returned in previous step
# Note: Just sends one request for all message ids with a return time < 10 ms
self.response = self.myIMAPc.fetch(messages, ['RFC822.SIZE'])
# Sorts the messages in decending order of their sizes
sorted_response = sorted(self.response.iteritems(), key=operator.itemgetter(1), reverse=True)
count = 1
try:
for item in sorted_response:
# Gets the entire content for the biggest message identified
big_message = self.myIMAPc.fetch(item[0], ['RFC822'])
for msgid, data in big_message.iteritems():
msg_string = data['RFC822']
msg = email.message_from_string(msg_string)
val = dict(self.response[msgid])['RFC822.SIZE']
print 'ID %d: From: %s Date: %s' % (msgid, msg['From'], msg['date'])
print 'To: %s' % (msg['To'])
print 'Subject: %s' % (msg['Subject'])
print 'Size: %d bytes \n' % (val)
user_del = raw_input("Do you want to delete this message?(Y/N): ")
if user_del == 'Y':
self.delete_message(msgid)
if count == len(sorted_response):
print "There are no more messages"
else:
print "\nMoving on to the next biggest message >>> \n"
elif user_del == '0':
print "Program exiting"
sys.exit()
else:
if count == len(sorted_response):
print "There are no more messages"
else:
print "\nMoving on to the next biggest message >>> \n"
count += 1
except KeyboardInterrupt:
print "Program exiting"
sys.exit()
# Deletes a message in the current folder based on msg id
def delete_message(self, id):
try:
self.myIMAPc.delete_messages([id])
self.myIMAPc.expunge()
print "Message deleted"
except IMAPClient.Error as err:
print "Message deletion failed"
print err
# Renames a folder
def rename_folder(self, oldfolder, newfolder):
try:
self.myIMAPc.rename_folder(oldfolder, newfolder)
print "Folder %s renamed to %s" % (oldfolder, newfolder)
except IMAPClient.Error as err:
print "Folder renaming failed"
print err
# Creates a new folder
def create_folder(self, folder):
try:
self.myIMAPc.create_folder(folder)
print "New folder %s created" % folder
except IMAPClient.Error as err:
print "Folder creation failed"
print err
# Deletes a folder
def delete_folder(self, folder):
try:
self.myIMAPc.delete_folder(folder)
print "Folder %s deleted" % folder
except IMAPClient.Error as err:
print "Folder deletion failed"
print err
# Creates a new folder and copies the content from the two folders that need to be merged
# Then deletes the old folders
def merge_folders(self, merged_folder, folder_1, folder_2):
try:
self.create_folder(merged_folder)
# Selects the folder with read/write permission
self.myIMAPc.select_folder(folder_1, True)
messages = self.myIMAPc.search(['NOT DELETED'])
print "Moving %d messages from %s to %s" % (len(messages), folder_1, merged_folder)
self.myIMAPc.copy(messages, merged_folder)
self.myIMAPc.select_folder(folder_2, True)
messages = self.myIMAPc.search(['NOT DELETED'])
print "Moving %d messages from %s to %s" % (len(messages), folder_2, merged_folder)
self.myIMAPc.copy(messages, merged_folder)
print "Deleting %s and %s..." % (folder_1, folder_2)
self.delete_folder(folder_1)
self.delete_folder(folder_2)
print "Merge folder operation succeeded"
except IMAPClient.Error as err:
print "Merge operation failed"
print err
def logout(self):
self.myIMAPc.logout()
def main():
# Using parser library for handling command line arguments
usage = "usage: python gmailPy.py [options]"
prog_desc = """gmailPy is a scalable command line gmail client capable of adding, deleting, renaming and merging folders. It also provides interface for the user to delete big messages based on size and search criteria."""
parser = optparse.OptionParser(usage=usage, description=prog_desc)
parser.add_option(
'-l', '--list', help="List folder statistics. This doesn't need any arguments. Usage: python gmailPy.py -l", dest='lf',
default=False, action='store_true')
parser.add_option(
'-b', '--big', help='Delete big messages. Please enter folder name as an argument. For example: python gmailPy.py -b INBOX',
dest='big_folder_name', action='store')
parser.add_option(
'-s', '--bigsearch', help='Delete big messages based on search criteria. This takes 3 arguments folder_name, command and criteria. For example: python gmailPy.py -s INBOX FROM [email protected]',
dest='bigsearch_folder_name', action='store', nargs=3)
parser.add_option(
'-n', '--new', help='Create new folder. Please enter folder name as an argument. For example: python gmailPy.py -n Test_folder',
dest='new_folder_name', action='store')
parser.add_option(
'-d', '--del', help='Delete a folder. Please enter folder name as an argument. For example: python gmailPy.py -d Test_folder',
dest='del_folder_name', action='store')
parser.add_option(
'-r', '--rename', help='Rename a folder. Please enter old_folder_name and new_folder_name as two arguments. For example: python gmailPy.py -r OLDFOLDERNAME NEWFOLDERNAME',
dest='rename_folder_name', action='store', nargs=2)
parser.add_option(
'-m', '--merge', help='Merge two folders. This takes 3 arguments merged_folder_name , folder_1_name , folder_2_name. For example: python gmailPy.py -m Test_folder_2 Test_folder_0 Test_folder_1',
dest='merge_folder_name', action='store', nargs=3)
(opts, args) = parser.parse_args()
try:
print "***** Welcome to gmailPy!!! A command line GMAIL Client *****"
print "Please enter your username and password >>>>>>"
username = raw_input("Username: ")
password = getpass.getpass()
## Can be set for testing and debugging
# username = 'username'
# password = 'password'
client_session = gmailPy()
client_session.login(username, password)
if opts.lf:
client_folders = client_session.get_folders()
print "########## Your folder Statistics ##########"
for item in client_folders:
try:
print item, ':', client_session.get_mail_count(item), 'messages'
except:
pass
print "############################################"
if opts.big_folder_name != None:
print "Let's enter your %s folder and delete big mail" % opts.big_folder_name
client_session.delete_bigmail(opts.big_folder_name)
available_commands = ['TO', 'FROM', 'SUBJECT']
if opts.bigsearch_folder_name != None:
if opts.bigsearch_folder_name[1] in available_commands:
print "Let's enter your %s folder and delete big mail with %s: %s" % (opts.bigsearch_folder_name[0], opts.bigsearch_folder_name[1], opts.bigsearch_folder_name[2])
client_session.delete_bigmail_search(
opts.bigsearch_folder_name[0], opts.bigsearch_folder_name[1], opts.bigsearch_folder_name[2])
else:
print "Invalid Command Entry. Please enter one of the follwing commands: ", available_commands
if opts.new_folder_name != None:
print "Creating a new folder with name %s ..." % opts.new_folder_name
client_session.create_folder(opts.new_folder_name)
if opts.del_folder_name != None:
print "Deleting %s folder..." % opts.del_folder_name
client_session.delete_folder(opts.del_folder_name)
if opts.rename_folder_name != None:
print "Renaming folder %s to %s..." % (opts.rename_folder_name[0], opts.rename_folder_name[1])
client_session.rename_folder(opts.rename_folder_name[0], opts.rename_folder_name[1])
if opts.merge_folder_name != None:
print "Merging folders %s and %s to %s..." % (opts.merge_folder_name[1], opts.merge_folder_name[2], opts.merge_folder_name[0])
client_session.merge_folders(opts.merge_folder_name[0], opts.merge_folder_name[1], opts.merge_folder_name[2])
client_session.logout()
except IMAPClient.Error as err:
print "Something awful happened"
print err
except KeyboardInterrupt:
print "gmailPy force shutdown"
client_session.logout()
if __name__ == '__main__':
main()
| gpl-2.0 | -6,506,492,856,560,792,000 | 47.055749 | 226 | 0.583019 | false |
xlorepdarkhelm/colors | colors/html.py | 1 | 1085 | """Contains the implementation of the HTML color group."""
__all__ = (
'HTML',
)
import enum
from colors import base
class HTML(base.ColorGroup):
"""
The color group for HTML 4.01 approved colors.
These are the colors as defined in the HTML 4.01 specification from 1999.
See Also:
`Wikipedia <https://en.wikipedia.org/wiki/Web_colors#HTML_color_names>`
"""
White = base.RGBColor(255, 255, 255)
Silver = base.RGBColor(192, 192, 192)
Gray = base.RGBColor(128, 128, 128)
Black = base.RGBColor( 0, 0, 0)
Red = base.RGBColor(255, 0, 0)
Maroon = base.RGBColor(128, 0, 0)
Yellow = base.RGBColor(255, 255, 0)
Olive = base.RGBColor(128, 128, 0)
Lime = base.RGBColor( 0, 255, 0)
Green = base.RGBColor( 0, 128, 0)
Aqua = base.RGBColor( 0, 255, 255)
Teal = base.RGBColor( 0, 128, 128)
Blue = base.RGBColor( 0, 0, 255)
Navy = base.RGBColor( 0, 0, 128)
Fuchsia = base.RGBColor(255, 0, 255)
Purple = base.RGBColor(128, 0, 128)
| mit | 1,404,955,823,179,742,200 | 28.324324 | 79 | 0.584332 | false |
wuzhy/autotest | scheduler/drones_unittest.py | 1 | 1912 | #!/usr/bin/python2.4
"""Tests for autotest_lib.scheduler.drones."""
import cPickle
import common
from autotest_lib.client.common_lib import utils
from autotest_lib.client.common_lib.test_utils import mock, unittest
from autotest_lib.scheduler import drones
from autotest_lib.server.hosts import ssh_host
class RemoteDroneTest(unittest.TestCase):
def setUp(self):
self.god = mock.mock_god()
self._mock_host = self.god.create_mock_class(ssh_host.SSHHost,
'mock SSHHost')
self.god.stub_function(drones.drone_utility, 'create_host')
def tearDown(self):
self.god.unstub_all()
def test_unreachable(self):
drones.drone_utility.create_host.expect_call('fakehost').and_return(
self._mock_host)
self._mock_host.is_up.expect_call().and_return(False)
self.assertRaises(drones.DroneUnreachable,
drones._RemoteDrone, 'fakehost')
def test_execute_calls_impl(self):
self.god.stub_with(drones._RemoteDrone, '_drone_utility_path',
'mock-drone-utility-path')
drones.drone_utility.create_host.expect_call('fakehost').and_return(
self._mock_host)
self._mock_host.is_up.expect_call().and_return(True)
mock_calls = ('foo',)
mock_result = utils.CmdResult(stdout=cPickle.dumps('mock return'))
self._mock_host.run.expect_call(
'python mock-drone-utility-path',
stdin=cPickle.dumps(mock_calls), stdout_tee=None,
connect_timeout=mock.is_instance_comparator(int)).and_return(
mock_result)
drone = drones._RemoteDrone('fakehost')
self.assertEqual('mock return', drone._execute_calls_impl(mock_calls))
self.god.check_playback()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -8,609,757,872,621,830,000 | 34.407407 | 78 | 0.618724 | false |
Sumith1896/sympy | sympy/matrices/expressions/matadd.py | 1 | 3161 | from __future__ import print_function, division
from sympy.core.compatibility import reduce
from operator import add
from sympy.core import Add, Basic, sympify
from sympy.functions import adjoint
from sympy.matrices.matrices import MatrixBase
from sympy.matrices.expressions.transpose import transpose
from sympy.strategies import (rm_id, unpack, flatten, sort, condition,
exhaust, do_one, glom)
from sympy.matrices.expressions.matexpr import MatrixExpr, ShapeError, ZeroMatrix
from sympy.utilities import default_sort_key, sift
class MatAdd(MatrixExpr):
"""A Sum of Matrix Expressions
MatAdd inherits from and operates like SymPy Add
>>> from sympy import MatAdd, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> C = MatrixSymbol('C', 5, 5)
>>> MatAdd(A, B, C)
A + B + C
"""
is_MatAdd = True
def __new__(cls, *args, **kwargs):
args = list(map(sympify, args))
check = kwargs.get('check', True)
obj = Basic.__new__(cls, *args)
if check:
validate(*args)
return obj
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j):
return Add(*[arg._entry(i, j) for arg in self.args])
def _eval_transpose(self):
return MatAdd(*[transpose(arg) for arg in self.args]).doit()
def _eval_adjoint(self):
return MatAdd(*[adjoint(arg) for arg in self.args]).doit()
def _eval_trace(self):
from trace import Trace
return MatAdd(*[Trace(arg) for arg in self.args]).doit()
def doit(self, **ignored):
return canonicalize(self)
def validate(*args):
if not all(arg.is_Matrix for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
A = args[0]
for B in args[1:]:
if A.shape != B.shape:
raise ShapeError("Matrices %s and %s are not aligned"%(A, B))
factor_of = lambda arg: arg.as_coeff_mmul()[0]
matrix_of = lambda arg: unpack(arg.as_coeff_mmul()[1])
def combine(cnt, mat):
if cnt == 1:
return mat
else:
return cnt * mat
def merge_explicit(matadd):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, eye, Matrix, MatAdd, pprint
>>> from sympy.matrices.expressions.matadd import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = eye(2)
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatAdd(A, B, C)
>>> pprint(X)
A + [1 0] + [1 2]
[ ] [ ]
[0 1] [3 4]
>>> pprint(merge_explicit(X))
A + [2 2]
[ ]
[3 5]
"""
groups = sift(matadd.args, lambda arg: isinstance(arg, MatrixBase))
if len(groups[True]) > 1:
return MatAdd(*(groups[False] + [reduce(add, groups[True])]))
else:
return matadd
rules = (rm_id(lambda x: x == 0 or isinstance(x, ZeroMatrix)),
unpack,
flatten,
glom(matrix_of, factor_of, combine),
merge_explicit,
sort(default_sort_key))
canonicalize = exhaust(condition(lambda x: isinstance(x, MatAdd),
do_one(*rules)))
| bsd-3-clause | -7,329,370,434,409,728,000 | 27.736364 | 81 | 0.595698 | false |
robwebset/script.pinsentry | resources/lib/database.py | 1 | 18767 | # -*- coding: utf-8 -*-
import xbmc
import xbmcaddon
import xbmcvfs
import sqlite3
import xbmcgui
# Import the common settings
from settings import log
from settings import os_path_join
ADDON = xbmcaddon.Addon(id='script.pinsentry')
#################################
# Class to handle database access
#################################
class PinSentryDB():
def __init__(self):
# Start by getting the database location
self.configPath = xbmc.translatePath(ADDON.getAddonInfo('profile'))
self.databasefile = os_path_join(self.configPath, "pinsentry_database.db")
log("PinSentryDB: Database file location = %s" % self.databasefile)
# Check to make sure the DB has been created
self._createDatabase()
# Removes the database if it exists
def cleanDatabase(self):
msg = "%s%s" % (ADDON.getLocalizedString(32113), "?")
isYes = xbmcgui.Dialog().yesno(ADDON.getLocalizedString(32001), msg)
if isYes:
# If the database file exists, delete it
if xbmcvfs.exists(self.databasefile):
xbmcvfs.delete(self.databasefile)
log("PinSentryDB: Removed database: %s" % self.databasefile)
else:
log("PinSentryDB: No database exists: %s" % self.databasefile)
# Creates the database if the file does not already exist
def _createDatabase(self):
# Make sure the database does not already exist
if not xbmcvfs.exists(self.databasefile):
# Get a connection to the database, this will create the file
conn = sqlite3.connect(self.databasefile)
conn.text_factory = str
c = conn.cursor()
# Create the version number table, this is a simple table
# that just holds the version details of what created it
# It should make upgrade later easier
c.execute('''CREATE TABLE version (version text primary key)''')
# Insert a row for the version
versionNum = "6"
# Run the statement passing in an array with one value
c.execute("INSERT INTO version VALUES (?)", (versionNum,))
# Create a table that will be used to store each Video and its access level
# The "id" will be auto-generated as the primary key
# Note: Index will automatically be created for "unique" values, so no
# need to manually create them
c.execute('''CREATE TABLE TvShows (id integer primary key, name text unique, dbid integer unique, level integer)''')
c.execute('''CREATE TABLE Movies (id integer primary key, name text unique, dbid integer unique, level integer)''')
c.execute('''CREATE TABLE MovieSets (id integer primary key, name text unique, dbid integer unique, level integer)''')
c.execute('''CREATE TABLE Plugins (id integer primary key, name text unique, dbid text unique, level integer)''')
c.execute('''CREATE TABLE Repositories (id integer primary key, name text unique, dbid text unique, level integer)''')
# This is in version 2
c.execute('''CREATE TABLE MusicVideos (id integer primary key, name text unique, dbid integer unique, level integer)''')
# This is in version 3
c.execute('''CREATE TABLE FileSources (id integer primary key, name text unique, dbid text unique, level integer)''')
# This is in version 4
c.execute('''CREATE TABLE ClassificationsMovies (id integer primary key, name text unique, dbid text, level integer)''')
c.execute('''CREATE TABLE ClassificationsTV (id integer primary key, name text unique, dbid text, level integer)''')
# This is in version 6
c.execute('''CREATE TABLE TvChannels (id integer primary key, name text unique, dbid integer unique, level integer)''')
# Save (commit) the changes
conn.commit()
# We can also close the connection if we are done with it.
# Just be sure any changes have been committed or they will be lost.
conn.close()
# Creates or DB if it does not exist, or updates it if it does already exist
def createOrUpdateDB(self):
if not xbmcvfs.exists(self.databasefile):
# No database created yet - nothing to do
self._createDatabase()
return
# The database was already created, check to see if they need to be updated
# Check if this is an upgrade
conn = sqlite3.connect(self.databasefile)
conn.text_factory = str
c = conn.cursor()
c.execute('SELECT * FROM version')
currentVersion = int(c.fetchone()[0])
log("PinSentryDB: Current version number in DB is: %d" % currentVersion)
# If the database is at version one, add the version 2 tables
if currentVersion < 2:
log("PinSentryDB: Updating to version 2")
# Add the tables that were added in version 2
c.execute('''CREATE TABLE MusicVideos (id integer primary key, name text unique, dbid integer unique, level integer)''')
# Update the new version of the database
currentVersion = 2
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
# If the database is at version two, add the version 3 tables
if currentVersion < 3:
log("PinSentryDB: Updating to version 3")
# Add the tables that were added in version 3
c.execute('''CREATE TABLE FileSources (id integer primary key, name text unique, dbid text unique, level integer)''')
# Update the new version of the database
currentVersion = 3
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
# If the database is at version three, add the version 4 tables
if currentVersion < 4:
log("PinSentryDB: Updating to version 4")
# Add the tables that were added in version 4
c.execute('''CREATE TABLE ClassificationsMovies (id integer primary key, name text unique, dbid text, level integer)''')
c.execute('''CREATE TABLE ClassificationsTV (id integer primary key, name text unique, dbid text, level integer)''')
# Update the new version of the database
currentVersion = 4
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
# If the database is at version four, add the version 5 tables
if currentVersion < 5:
log("PinSentryDB: Updating to version 5")
# Add the tables that were added in version 5
c.execute('''CREATE TABLE Repositories (id integer primary key, name text unique, dbid text unique, level integer)''')
# Update the new version of the database
currentVersion = 5
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
# If the database is at version five, add the version 6 tables
if currentVersion < 6:
log("PinSentryDB: Updating to version 6")
# Add the tables that were added in version 6
c.execute('''CREATE TABLE TvChannels (id integer primary key, name text unique, dbid integer unique, level integer)''')
# Update the new version of the database
currentVersion = 6
c.execute('DELETE FROM version')
c.execute("INSERT INTO version VALUES (?)", (currentVersion,))
# Save (commit) the changes
conn.commit()
conn.close()
# Get a connection to the current database
def getConnection(self):
conn = sqlite3.connect(self.databasefile)
conn.text_factory = str
return conn
# Set the security value for a given TvShow
def setTvShowSecurityLevel(self, showName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("TvShows", showName, dbid, level)
else:
self._deleteSecurityDetails("TvShows", showName)
return ret
# Set the security value for a given Movie
def setMovieSecurityLevel(self, movieName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("Movies", movieName, dbid, level)
else:
self._deleteSecurityDetails("Movies", movieName)
return ret
# Set the security value for a given Movie Set
def setMovieSetSecurityLevel(self, movieSetName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("MovieSets", movieSetName, dbid, level)
else:
self._deleteSecurityDetails("MovieSets", movieSetName)
return ret
# Set the security value for a given Plugin
def setPluginSecurityLevel(self, pluginName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("Plugins", pluginName, dbid, level)
else:
self._deleteSecurityDetails("Plugins", pluginName)
return ret
# Set the security value for a given Repository
def setRepositorySecurityLevel(self, repoName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("Repositories", repoName, dbid, level)
else:
self._deleteSecurityDetails("Repositories", repoName)
return ret
# Set the security value for a given Music Video
def setMusicVideoSecurityLevel(self, musicVideoName, dbid, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("MusicVideos", musicVideoName, dbid, level)
else:
self._deleteSecurityDetails("MusicVideos", musicVideoName)
return ret
# Set the security value for a given File Source
def setFileSourceSecurityLevel(self, sourceName, sourcePath, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("FileSources", sourceName, sourcePath, level)
else:
self._deleteSecurityDetails("FileSources", sourceName)
return ret
# Set the security value for a given Movie Classification
def setMovieClassificationSecurityLevel(self, id, match, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("ClassificationsMovies", id, match, level)
else:
self._deleteSecurityDetails("ClassificationsMovies", id)
return ret
# Set the security value for a given TV Classification
def setTvClassificationSecurityLevel(self, id, match, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("ClassificationsTV", id, match, level)
else:
self._deleteSecurityDetails("ClassificationsTV", id)
return ret
# Set the security value for a given TV Channel
def setTvChannelSecurityLevel(self, channelName, id, level=1):
ret = -1
if level != 0:
ret = self._insertOrUpdate("TvChannels", channelName, id, level)
else:
self._deleteSecurityDetails("TvChannels", channelName)
return ret
# Insert or replace an entry in the database
def _insertOrUpdate(self, tableName, name, dbid, level=1):
log("PinSentryDB: Adding %s %s (id:%s) at level %d" % (tableName, name, str(dbid), level))
# Get a connection to the DB
conn = self.getConnection()
c = conn.cursor()
insertData = (name, dbid, level)
cmd = 'INSERT OR REPLACE INTO %s (name, dbid, level) VALUES (?,?,?)' % tableName
c.execute(cmd, insertData)
rowId = c.lastrowid
conn.commit()
conn.close()
return rowId
# Delete an entry from the database
def _deleteSecurityDetails(self, tableName, name):
log("PinSentryDB: delete %s for %s" % (tableName, name))
# Get a connection to the DB
conn = self.getConnection()
c = conn.cursor()
# Delete any existing data from the database
cmd = 'DELETE FROM %s where name = ?' % tableName
c.execute(cmd, (name,))
conn.commit()
log("PinSentryDB: delete for %s removed %d rows" % (name, conn.total_changes))
conn.close()
# Get the security value for a given TvShow
def getTvShowSecurityLevel(self, showName):
return self._getSecurityLevel("TvShows", showName)
# Get the security value for a given Movie
def getMovieSecurityLevel(self, movieName):
return self._getSecurityLevel("Movies", movieName)
# Get the security value for a given Movie Set
def getMovieSetSecurityLevel(self, movieSetName):
return self._getSecurityLevel("MovieSets", movieSetName)
# Get the security value for a given Plugin
def getPluginSecurityLevel(self, pluginName):
return self._getSecurityLevel("Plugins", pluginName)
# Get the security value for a given Repository
def getRepositorySecurityLevel(self, pluginName):
return self._getSecurityLevel("Repositories", pluginName)
# Get the security value for a given Music Video
def getMusicVideoSecurityLevel(self, musicVideoName):
return self._getSecurityLevel("MusicVideos", musicVideoName)
# Get the security value for a given File Source
def getFileSourceSecurityLevel(self, sourceName):
return self._getSecurityLevel("FileSources", sourceName)
# Select the security entry from the database for a given File Source Path
def getFileSourceSecurityLevelForPath(self, path):
return self._getSecurityLevel("FileSources", path, 'dbid')
# Get the security value for a given Movie Classification
def getMovieClassificationSecurityLevel(self, className):
return self._getSecurityLevel("ClassificationsMovies", className, 'dbid')
# Get the security value for a given TV Classification
def getTvClassificationSecurityLevel(self, className):
return self._getSecurityLevel("ClassificationsTV", className, 'dbid')
# Get the security value for a given TV Channel
def getTvChannelsSecurityLevel(self, channelName):
return self._getSecurityLevel("TvChannels", channelName)
# Select the security entry from the database
def _getSecurityLevel(self, tableName, name, dbField='name'):
log("PinSentryDB: select %s for %s (dbField=%s)" % (tableName, name, dbField))
# Get a connection to the DB
conn = self.getConnection()
c = conn.cursor()
# Select any existing data from the database
cmd = 'SELECT * FROM %s where %s = ?' % (tableName, dbField)
c.execute(cmd, (name,))
row = c.fetchone()
securityLevel = 0
if row is None:
log("PinSentryDB: No entry found in the database for %s" % name)
# Not stored in the database so return 0 for no pin required
else:
log("PinSentryDB: Database info: %s" % str(row))
# Return will contain
# row[0] - Unique Index in the DB
# row[1] - Name of the TvShow/Movie/MovieSet
# row[2] - dbid
# row[3] - Security Level
securityLevel = row[3]
conn.close()
return securityLevel
# Select all TvShow entries from the database
def getAllTvShowsSecurity(self):
return self._getAllSecurityDetails("TvShows")
# Select all Movie entries from the database
def getAllMoviesSecurity(self):
return self._getAllSecurityDetails("Movies")
# Select all Movie Set entries from the database
def getAllMovieSetsSecurity(self):
return self._getAllSecurityDetails("MovieSets")
# Select all Plugin entries from the database
def getAllPluginsSecurity(self):
return self._getAllSecurityDetails("Plugins")
# Select all Plugin entries from the database
def getAllRepositoriesSecurity(self):
return self._getAllSecurityDetails("Repositories")
# Select all Music Video entries from the database
def getAllMusicVideosSecurity(self):
return self._getAllSecurityDetails("MusicVideos")
# Select all File Sources entries from the database
def getAllFileSourcesSecurity(self):
return self._getAllSecurityDetails("FileSources")
# Get All File Source Paths entries from the database
def getAllFileSourcesPathsSecurity(self):
# The path is stored in the ID column, so use that as the key
return self._getAllSecurityDetails("FileSources", keyCol=2)
# Get All Movie Classification entries from the database
def getAllMovieClassificationSecurity(self, useCertKey=False):
keyCol = 1
if useCertKey:
keyCol = 2
return self._getAllSecurityDetails("ClassificationsMovies", keyCol)
# Get All TV Classification entries from the database
def getAllTvClassificationSecurity(self, useCertKey=False):
keyCol = 1
if useCertKey:
keyCol = 2
return self._getAllSecurityDetails("ClassificationsTV", keyCol)
# Get All File Source Paths entries from the database
def getAllTvChannelsSecurity(self):
# The path is stored in the ID column, so use that as the key
return self._getAllSecurityDetails("TvChannels")
# Select all security details from a given table in the database
def _getAllSecurityDetails(self, tableName, keyCol=1):
log("PinSentryDB: select all %s" % tableName)
# Get a connection to the DB
conn = self.getConnection()
c = conn.cursor()
# Select any existing data from the database
cmd = 'SELECT * FROM %s' % tableName
c.execute(cmd)
rows = c.fetchall()
resultDict = {}
if rows is None:
# No data
log("PinSentryDB: No entry found in TvShow database")
else:
log("PinSentryDB: Database info: %s" % str(rows))
# Return will contain
# row[0] - Unique Index in the DB
# row[1] - Name of the TvShow/Movie/MovieSet
# row[2] - dbid
# row[3] - Security Level
for row in rows:
name = row[keyCol]
resultDict[name] = row[3]
conn.close()
return resultDict
| gpl-2.0 | 6,275,682,289,483,418,000 | 40.519912 | 132 | 0.6326 | false |
josephsuh/extra-specs | nova/tests/api/openstack/compute/contrib/test_keypairs.py | 1 | 11623 | # Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from lxml import etree
import webob
from nova.api.openstack.compute.contrib import keypairs
from nova.api.openstack import wsgi
from nova import db
from nova import exception
from nova import quota
from nova import test
from nova.tests.api.openstack import fakes
QUOTAS = quota.QUOTAS
def fake_keypair(name):
return {'public_key': 'FAKE_KEY',
'fingerprint': 'FAKE_FINGERPRINT',
'name': name}
def db_key_pair_get_all_by_user(self, user_id):
return [fake_keypair('FAKE')]
def db_key_pair_create(self, keypair):
pass
def db_key_pair_destroy(context, user_id, name):
if not (user_id and name):
raise Exception()
def db_key_pair_get(context, user_id, name):
pass
class KeypairsTest(test.TestCase):
def setUp(self):
super(KeypairsTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(db, "key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
self.stubs.Set(db, "key_pair_create",
db_key_pair_create)
self.stubs.Set(db, "key_pair_destroy",
db_key_pair_destroy)
def test_keypair_list(self):
req = webob.Request.blank('/v2/fake/os-keypairs')
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
response = {'keypairs': [{'keypair': fake_keypair('FAKE')}]}
self.assertEqual(res_dict, response)
def test_keypair_create(self):
body = {'keypair': {'name': 'create_test'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
res_dict = json.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertTrue(len(res_dict['keypair']['private_key']) > 0)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_invalid_name(self):
body = {
'keypair': {
'name': 'a' * 256
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_create_with_non_alphanumeric_name(self):
body = {
'keypair': {
'name': 'test/keypair'
}
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
body = {'keypair': {'name': 'foo'}}
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 413)
def test_keypair_import(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
# FIXME(ja): sholud we check that public_key was sent to create?
res_dict = json.loads(res.body)
self.assertTrue(len(res_dict['keypair']['fingerprint']) > 0)
self.assertFalse('private_key' in res_dict['keypair'])
def test_keypair_import_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
'bHkXa6OciiJDvkRzJXzf',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 413)
def test_keypair_create_quota_limit(self):
def fake_quotas_count(self, context, resource, *args, **kwargs):
return 100
self.stubs.Set(QUOTAS, "count", fake_quotas_count)
body = {
'keypair': {
'name': 'create_test',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 413)
def test_keypair_create_duplicate(self):
self.stubs.Set(db, "key_pair_get", db_key_pair_get)
body = {'keypair': {'name': 'create_duplicate'}}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 409)
def test_keypair_import_bad_key(self):
body = {
'keypair': {
'name': 'create_test',
'public_key': 'ssh-what negative',
},
}
req = webob.Request.blank('/v2/fake/os-keypairs')
req.method = 'POST'
req.body = json.dumps(body)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
def test_keypair_delete(self):
req = webob.Request.blank('/v2/fake/os-keypairs/FAKE')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
def test_keypair_delete_not_found(self):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeyPairNotFound()
self.stubs.Set(db, "key_pair_get",
db_key_pair_get_not_found)
req = webob.Request.blank('/v2/fake/os-keypairs/WHAT')
res = req.get_response(fakes.wsgi_app())
print res
self.assertEqual(res.status_int, 404)
class KeypairsXMLSerializerTest(test.TestCase):
def setUp(self):
super(KeypairsXMLSerializerTest, self).setUp()
self.deserializer = wsgi.XMLDeserializer()
def test_default_serializer(self):
exemplar = dict(keypair=dict(
public_key='fake_public_key',
private_key='fake_private_key',
fingerprint='fake_fingerprint',
user_id='fake_user_id',
name='fake_key_name'))
serializer = keypairs.KeypairTemplate()
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('keypair', tree.tag)
for child in tree:
self.assertTrue(child.tag in exemplar['keypair'])
self.assertEqual(child.text, exemplar['keypair'][child.tag])
def test_index_serializer(self):
exemplar = dict(keypairs=[
dict(keypair=dict(
name='key1_name',
public_key='key1_key',
fingerprint='key1_fingerprint')),
dict(keypair=dict(
name='key2_name',
public_key='key2_key',
fingerprint='key2_fingerprint'))])
serializer = keypairs.KeypairsTemplate()
text = serializer.serialize(exemplar)
print text
tree = etree.fromstring(text)
self.assertEqual('keypairs', tree.tag)
self.assertEqual(len(exemplar['keypairs']), len(tree))
for idx, keypair in enumerate(tree):
self.assertEqual('keypair', keypair.tag)
kp_data = exemplar['keypairs'][idx]['keypair']
for child in keypair:
self.assertTrue(child.tag in kp_data)
self.assertEqual(child.text, kp_data[child.tag])
def test_deserializer(self):
exemplar = dict(keypair=dict(
name='key_name',
public_key='public_key'))
intext = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<keypair><name>key_name</name>'
'<public_key>public_key</public_key></keypair>')
result = self.deserializer.deserialize(intext)['body']
self.assertEqual(result, exemplar)
| apache-2.0 | 210,862,576,793,496,740 | 35.550314 | 78 | 0.580573 | false |
eevee/flax | flax/ui/console/util.py | 1 | 3170 | """Utility widgets, not really specific to the game."""
import sys
import urwid
class LogWidget(urwid.ListBox):
# Can't receive focus on its own; assumed that some parent widget will
# worry about scrolling us
_selectable = False
def __init__(self):
super().__init__(urwid.SimpleListWalker([]))
def add_log_line(self, line):
text = urwid.Text(('log-game', line))
self.body.append(text)
self.focus_position = len(self.body) - 1
class ToggleableOverlay(urwid.Overlay):
"""An Overlay where the top widget can be swapped out or hidden entirely.
If the top widget is removed, focus passes to the bottom widget.
"""
def __init__(self, bottom_w):
super().__init__(
None, bottom_w,
# These get replaced every time; just need some sane defaults
align='center', valign='middle', height='pack', width='pack',
)
def selectable(self):
return self.focus.selectable()
def keypress(self, size, key):
if self.top_w:
return super().keypress(size, key)
else:
return self.bottom_w.keypress(size, key)
@property
def focus(self):
if self.top_w:
return self.top_w
else:
return self.bottom_w
@property
def focus_position(self):
if self.top_w:
return 1
else:
return 0
@focus_position.setter
def focus_position(self, position):
if position == 0:
self.top_w = None
else:
super().focus_position = position
# TODO override `contents` to return a 1-element thing
def render(self, size, focus=False):
if self.top_w:
return super().render(size, focus)
else:
return self.bottom_w.render(size, focus)
### New APIs
def _close_handler(self, widget, *args):
urwid.disconnect_signal(widget, 'close-overlay', self._close_handler)
if self._onclose:
self._onclose(*args)
self.change_overlay(None)
_onclose = None
def change_overlay(self, widget, onclose=None, **kwargs):
self._onclose = onclose
if widget:
urwid.disconnect_signal(widget, 'close-overlay', self._close_handler)
urwid.connect_signal(widget, 'close-overlay', self._close_handler)
if 'box' in widget.sizing():
# A box is probably a popup, so center it
defaults = dict(
align='center',
valign='middle',
width=('relative', 90),
height=('relative', 90),
)
else:
# Otherwise it's probably a prompt or something, so stick it at
# the bottom
defaults = dict(
align='left',
valign='bottom',
width=('relative', 100),
height='pack',
)
defaults.update(kwargs)
self.set_overlay_parameters(**defaults)
self.top_w = widget
self._invalidate()
| mit | -6,422,756,433,398,865,000 | 27.558559 | 81 | 0.543218 | false |
Kismuz/btgym | btgym/research/strategy_gen_5/base.py | 1 | 37905 | ###############################################################################
#
# Copyright (C) 2017-2018 Andrew Muzikin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import backtrader as bt
import backtrader.indicators as btind
from gym import spaces
from btgym import DictSpace
import numpy as np
from collections import deque
from btgym.strategy.utils import norm_value, decayed_result, exp_scale
############################## Base BTgymStrategy Class ###################
class BaseStrategy5(bt.Strategy):
"""
'New and improved' base startegy class.
Incorporates state declaration and preprocessing improvements.
Current candidate to replace current BTgymBaseStrategy.
Controls Environment inner dynamics and backtesting logic. Provides gym'my (State, Action, Reward, Done, Info) data.
Any State, Reward and Info computation logic can be implemented by subclassing BTgymStrategy and overriding
get_[mode]_state(), get_reward(), get_info(), is_done() and set_datalines() methods.
One can always go deeper and override __init__ () and next() methods for desired
server cerebro engine behaviour, including order execution logic etc.
Note:
- base class supports single asset iteration via default data_line named 'base_asset', see derived classes
multi-asset support
- bt.observers.DrawDown observer will be automatically added to BTgymStrategy instance at runtime.
- Since it is bt.Strategy subclass, refer to https://www.backtrader.com/docu/strategy.html for more information.
"""
# Time embedding period:
time_dim = 4 # NOTE: changed this --> change Policy UNREAL for aux. pix control task upsampling params
# Number of timesteps reward estimation statistics are averaged over, should be:
# skip_frame_period <= avg_period <= time_embedding_period:
avg_period = time_dim
# Possible agent actions; Note: place 'hold' first! :
portfolio_actions = ('hold', 'buy', 'sell', 'close')
features_parameters = ()
num_features = len(features_parameters)
params = dict(
# Observation state shape is dictionary of Gym spaces,
# at least should contain `raw_state` field.
# By convention first dimension of every Gym Box space is time embedding one;
# one can define any shape; should match env.observation_space.shape.
# observation space state min/max values,
# For `raw_state' (default) - absolute min/max values from BTgymDataset will be used.
state_shape={
'raw': spaces.Box(
shape=(time_dim, 4),
low=0, # will get overridden.
high=0,
dtype=np.float32,
),
'metadata': DictSpace(
{
'type': spaces.Box(
shape=(),
low=0,
high=1,
dtype=np.uint32
),
'trial_num': spaces.Box(
shape=(),
low=0,
high=10 ** 10,
dtype=np.uint32
),
'trial_type': spaces.Box(
shape=(),
low=0,
high=1,
dtype=np.uint32
),
'sample_num': spaces.Box(
shape=(),
low=0,
high=10 ** 10,
dtype=np.uint32
),
'first_row': spaces.Box(
shape=(),
low=0,
high=10 ** 10,
dtype=np.uint32
),
'timestamp': spaces.Box(
shape=(),
low=0,
high=np.finfo(np.float64).max,
dtype=np.float64
),
}
)
},
cash_name='default_cash',
asset_names=['default_asset'],
start_cash=None,
commission=None,
slippage=None,
leverage=1.0,
gamma=0.99, # fi_gamma, should match MDP gamma decay
reward_scale=1, # reward multiplicator
drawdown_call=10, # finish episode when hitting drawdown treshghold , in percent.
target_call=10, # finish episode when reaching profit target, in percent.
dataset_stat=None, # Summary descriptive statistics for entire dataset and
episode_stat=None, # current episode. Got updated by server.
time_dim=time_dim, # time embedding period
avg_period=avg_period, # number of time steps reward estimation statistics are averaged over
features_parameters=features_parameters,
num_features=num_features,
metadata={},
broadcast_message={},
trial_stat=None,
trial_metadata=None,
portfolio_actions=portfolio_actions,
skip_frame=1, # number of environment steps to skip before returning next environment response
order_size=None,
initial_action=None,
initial_portfolio_action=None,
state_int_scale=1,
state_ext_scale=1,
)
def __init__(self, **kwargs):
"""
Keyword Args:
params (dict): parameters dictionary, see Note below.
Notes:
Due to backtrader convention, any strategy arguments should be defined inside `params` dictionary
or passed as kwargs to bt.Cerebro() class via .addstrategy() method. Parameter dictionary
should contain at least these keys::
state_shape: Observation state shape is dictionary of Gym spaces, by convention
first dimension of every Gym Box space is time embedding one;
cash_name: str, name for cash asset
asset_names: iterable of str, names for assets
start_cash: float, broker starting cash
commission: float, broker commission value, .01 stands for 1%
leverage: float, broker leverage
slippage: float, broker execution slippage
order_size: dict of fixed order stakes (floats); keys should match assets names.
drawdown_call: finish episode when hitting this drawdown treshghold , in percent.
target_call: finish episode when reaching this profit target, in percent.
portfolio_actions: possible agent actions.
skip_frame: number of environment steps to skip before returning next response,
e.g. if set to 10 -- agent will interact with environment every 10th step;
every other step agent action is assumed to be 'hold'.
Default values are::
state_shape=dict(raw_state=spaces.Box(shape=(4, 4), low=0, high=0,))
cash_name='default_cash'
asset_names=['default_asset']
start_cash=None
commission=None
slippage=None,
leverage=1.0
drawdown_call=10
target_call=10
dataset_stat=None
episode_stat=None
portfolio_actions=('hold', 'buy', 'sell', 'close')
skip_frame=1
order_size=None
"""
# Inherit logger from cerebro:
self.log = self.env._log
self.skip_frame = self.p.skip_frame
self.iteration = 0
self.env_iteration = 0
self.inner_embedding = 1
self.is_done = False
self.is_done_enabled = False
self.steps_till_is_done = 2 # extra steps to make when episode terminal conditions are met
self.action = self.p.initial_portfolio_action
self.action_to_repeat = self.p.initial_portfolio_action
self.action_repeated = 0
self.num_action_repeats = None
self.reward = 0
self.order = None
self.order_failed = 0
self.broker_message = '_'
self.final_message = '_'
self.raw_state = None
self.time_stamp = 0
# Configure state_shape:
if self.p.state_shape is None:
self.p.state_shape = self.set_state_shape()
# Prepare broker:
if self.p.start_cash is not None:
self.env.broker.setcash(self.p.start_cash)
if self.p.commission is not None:
self.env.broker.setcommission(commission=self.p.commission, leverage=self.p.leverage)
if self.p.slippage is not None:
# Bid/ask workaround: set overkill 10% slippage + slip_out=False
# ensuring we always buy at current 'high'~'ask' and sell at 'low'~'bid':
self.env.broker.set_slippage_perc(self.p.slippage, slip_open=True, slip_match=True, slip_out=False)
# Normalisation constant for statistics derived from account value:
self.broker_value_normalizer = 1 / \
self.env.broker.startingcash / (self.p.drawdown_call + self.p.target_call) * 100
self.target_value = self.env.broker.startingcash * (1 + self.p.target_call / 100)
# Try to define stake, if no self.p.order_size dict has been set:
if self.p.order_size is None:
# If no order size has been set for every data_line,
# try to infer stake size from sizer set by bt.Cerebro.addsizer() method:
try:
assert len(list(self.env.sizers.values())) == 1
env_sizer_params = list(self.env.sizers.values())[0][-1] # pull dict of outer set sizer params
assert 'stake' in env_sizer_params.keys()
except (AssertionError, KeyError) as e:
msg = 'Order stake is not set neither via strategy.param.order_size nor via bt.Cerebro.addsizer method.'
self.log.error(msg)
raise ValueError(msg)
self.p.order_size = {name: env_sizer_params['stake'] for name in self.p.asset_names}
elif isinstance(self.p.order_size, int) or isinstance(self.p.order_size, float):
unimodal_stake = {name: self.p.order_size for name in self.getdatanames()}
self.p.order_size = unimodal_stake
# self.log.warning('asset names: {}'.format(self.p.asset_names))
# self.log.warning('data names: {}'.format(self.getdatanames()))
self.trade_just_closed = False
self.trade_result = 0
self.unrealized_pnl = None
self.norm_broker_value = None
self.realized_pnl = None
self.current_pos_duration = 0
self.current_pos_min_value = 0
self.current_pos_max_value = 0
self.realized_broker_value = self.env.broker.startingcash
self.episode_result = 0 # not used
# Service sma to get correct first features values:
self.data.dim_sma = btind.SimpleMovingAverage(
self.datas[0],
period=self.p.time_dim
)
self.data.dim_sma.plotinfo.plot = False
# self.log.warning('self.p.dir: {}'.format(dir(self.params)))
# Episode-wide metadata:
self.metadata = {
'type': np.asarray(self.p.metadata['type']),
'trial_num': np.asarray(self.p.metadata['parent_sample_num']),
'trial_type': np.asarray(self.p.metadata['parent_sample_type']),
'sample_num': np.asarray(self.p.metadata['sample_num']),
'first_row': np.asarray(self.p.metadata['first_row']),
'timestamp': np.asarray(self.time_stamp, dtype=np.float64)
}
self.state = {
'raw': None,
'metadata': None
}
# If it is train or test episode?
# default logic: true iff. it is test episode from target domain:
self.is_test = self.metadata['type'] and self.metadata['trial_type']
# This flag shows to the outer world if this episode can broadcast world-state information, e.g. move global
# time forward (see: btgym.server._BTgymAnalyzer.next() method);
self.can_broadcast = self.is_test
self.log.debug('strategy.metadata: {}'.format(self.metadata))
self.log.debug('is_test: {}'.format(self.is_test))
# Broker data lines of interest (used for estimation inner state of agent:
self.broker_datalines = [
'cash',
'value',
'exposure',
'drawdown',
'pos_duration',
'realized_pnl',
'unrealized_pnl',
'min_unrealized_pnl',
'max_unrealized_pnl',
'total_unrealized_pnl',
]
# Define flat collection dictionary looking up for methods for estimating broker statistics,
# one method for one mode, should be named .get_broker_[mode_name]():
self.collection_get_broker_stat_methods = {}
for line in self.broker_datalines:
try:
self.collection_get_broker_stat_methods[line] = getattr(self, 'get_broker_{}'.format(line))
except AttributeError:
raise NotImplementedError('Callable get_broker_{}.() not found'.format(line))
# Broker and account related sliding statistics accumulators, globally normalized last `avg_perod` values,
# so it's a bit more computationally efficient than use of bt.Observers:
self.broker_stat = {key: deque(maxlen=self.avg_period) for key in self.broker_datalines}
# Add custom data Lines if any (convenience wrapper):
self.set_datalines()
self.log.debug('Kwargs:\n{}\n'.format(str(kwargs)))
# Define flat collection dictionary looking for methods for estimating observation state,
# one method for one mode, should be named .get_[mode_name]_state():
self.collection_get_state_methods = {}
for key in self.p.state_shape.keys():
try:
self.collection_get_state_methods[key] = getattr(self, 'get_{}_state'.format(key))
except AttributeError:
raise NotImplementedError('Callable get_{}_state.() not found'.format(key))
for data in self.datas:
self.log.debug('data_name: {}'.format(data._name))
self.log.debug('stake size: {}'.format(self.p.order_size))
# Define how this strategy should handle actions: either as discrete or continuous:
if self.p.portfolio_actions is None or set(self.p.portfolio_actions) == {}:
# No discrete actions provided, assume continuous:
try:
assert self.p.skip_frame > 1
except AssertionError:
msg = 'For continuous actions it is essential to set `skip_frame` parameter > 1, got: {}'.format(
self.p.skip_frame
)
self.log.error(msg)
raise ValueError(msg)
# Disable broker checking margin,
# see: https://community.backtrader.com/topic/152/multi-asset-ranking-and-rebalancing/2?page=1
self.env.broker.set_checksubmit(False)
self.next_process_fn = self._next_target_percent
# Repeat action 2 times:
self.num_action_repeats = 2
else:
# Use discrete handling method otherwise:
self.env.broker.set_checksubmit(True)
self.next_process_fn = self._next_discrete
# self.log.warning('DISCRETE')
# Do not repeat action for discrete:
self.num_action_repeats = 0
def prenext(self):
self.update_broker_stat()
def nextstart(self):
self.inner_embedding = self.data.close.buflen()
self.log.debug('Inner time embedding: {}'.format(self.inner_embedding))
def next(self):
"""
Default implementation for built-in backtrader method.
Defines one step environment routine;
Handles order execution logic according to action received.
Note that orders can only be submitted for data_lines in action_space (assets).
`self.action` attr. is updated by btgym.server._BTgymAnalyzer, and `None` actions
are emitted while doing `skip_frame` loop.
"""
self.update_broker_stat()
if '_skip_this' in self.action.keys():
# print('a_skip, b_message: ', self.broker_message)
if self.action_repeated < self.num_action_repeats:
self.next_process_fn(self.action_to_repeat)
self.action_repeated += 1
else:
self.next_process_fn(self.action)
self.action_repeated = 0
self.action_to_repeat = self.action
# print('a_process, b_message: ', self.broker_message)
def notify_trade(self, trade):
if trade.isclosed:
# Set trade flags: True if trade have been closed just now and within last frame-skip period,
# and store trade result:
self.trade_just_closed = True
# Note: `trade_just_closed` flag has to be reset manually after evaluating.
self.trade_result += trade.pnlcomm
# Store realized prtfolio value:
self.realized_broker_value = self.broker.get_value()
# self.log.warning('notify_trade: trade_pnl: {}, cum_trade_result: {}, realized_value: {}'.format(
# trade.pnlcomm, self.trade_result, self.realized_broker_value)
# )
def update_broker_stat(self):
"""
Updates all sliding broker statistics deques with latest-step values such as:
- normalized broker value
- normalized broker cash
- normalized exposure (position size)
- exp. scaled episode duration in steps, normalized wrt. max possible episode steps
- normalized realized profit/loss for last closed trade (is zero if no pos. closures within last env. step)
- normalized profit/loss for current opened trade (unrealized p/l);
"""
# Current account value:
current_value = self.env.broker.get_value()
# Individual positions for each instrument traded:
positions = [self.env.broker.getposition(data) for data in self.datas]
exposure = sum([abs(pos.size) for pos in positions])
for key, method in self.collection_get_broker_stat_methods.items():
self.broker_stat[key].append(
method(
current_value=current_value,
positions=positions,
exposure=exposure,
)
)
# Reset one-time flags:
self.trade_just_closed = False
self.trade_result = 0
def get_broker_value(self, current_value, **kwargs):
"""
Args:
current_value: current portfolio value
Returns:
normalized broker value.
"""
return norm_value(
current_value,
self.env.broker.startingcash,
self.p.drawdown_call,
self.p.target_call,
)
def get_broker_cash(self, **kwargs):
"""
Returns:
normalized broker cash
"""
return norm_value(
self.env.broker.get_cash(),
self.env.broker.startingcash,
99.0,
self.p.target_call,
)
def get_broker_exposure(self, exposure, **kwargs):
"""
Returns:
normalized exposure (position size)
"""
return exposure / (self.env.broker.startingcash * self.env.broker.get_leverage() + 1e-2)
def get_broker_realized_pnl(self, current_value, **kwargs):
"""
Args:
current_value: current portfolio value
Returns:
normalized realized profit/loss for last closed trade (is zero if no pos. closures within last env. step)
"""
if self.trade_just_closed:
pnl = decayed_result(
self.trade_result,
current_value,
self.env.broker.startingcash,
self.p.drawdown_call,
self.p.target_call,
gamma=1
)
# self.log.warning('get_broker_realized_pnl: got result: {} --> pnl: {}'.format(self.trade_result, pnl))
# Reset flag:
# self.trade_just_closed = False
# print('broker_realized_pnl: step {}, just closed.'.format(self.iteration))
else:
pnl = 0.0
return pnl
def get_broker_unrealized_pnl(self, current_value, **kwargs):
"""
Args:
current_value: current portfolio value
Returns:
normalized profit/loss for current opened trade
"""
return (current_value - self.realized_broker_value) * self.broker_value_normalizer
def get_broker_total_unrealized_pnl(self, current_value, **kwargs):
"""
Args:
current_value: current portfolio value
Returns:
normalized profit/loss wrt. initial portfolio value
"""
return (current_value - self.env.broker.startingcash) * self.broker_value_normalizer
def get_broker_episode_step(self, **kwargs):
"""
Returns:
exp. scaled episode duration in steps, normalized wrt. max possible episode steps
"""
return exp_scale(
self.iteration / (self.data.numrecords - self.inner_embedding),
gamma=3
)
def get_broker_drawdown(self, **kwargs):
"""
Returns:
current drawdown value
"""
try:
dd = self.stats.drawdown.drawdown[-1] # / self.p.drawdown_call
except IndexError:
dd = 0.0
return dd
def get_broker_pos_duration(self, exposure, **kwargs):
if exposure == 0:
self.current_pos_duration = 0
# print('ZERO_POSITION\n')
else:
self.current_pos_duration += 1
return self.current_pos_duration
def get_broker_max_unrealized_pnl(self, current_value, exposure, **kwargs):
if exposure == 0:
self.current_pos_max_value = current_value
else:
if self.current_pos_max_value < current_value:
self.current_pos_max_value = current_value
return (self.current_pos_max_value - self.realized_broker_value) * self.broker_value_normalizer
def get_broker_min_unrealized_pnl(self, current_value, exposure, **kwargs):
if exposure == 0:
self.current_pos_min_value = current_value
else:
if self.current_pos_min_value > current_value:
self.current_pos_min_value = current_value
return (self.current_pos_min_value - self.realized_broker_value) * self.broker_value_normalizer
def set_datalines(self):
"""
Default datalines are: Open, Low, High, Close, Volume.
Any other custom data lines, indicators, etc. should be explicitly defined by overriding this method.
Invoked once by Strategy.__init__().
"""
pass
def get_raw_state(self):
"""
Default state observation composer.
Returns:
and updates time-embedded environment state observation as [n,4] numpy matrix, where:
4 - number of signal features == state_shape[1],
n - time-embedding length == state_shape[0] == <set by user>.
Note:
`self.raw_state` is used to render environment `human` mode and should not be modified.
"""
self.raw_state = np.row_stack(
(
np.frombuffer(self.data.open.get(size=self.time_dim)),
np.frombuffer(self.data.high.get(size=self.time_dim)),
np.frombuffer(self.data.low.get(size=self.time_dim)),
np.frombuffer(self.data.close.get(size=self.time_dim)),
)
).T
return self.raw_state
def get_internal_state(self):
"""
Composes internal state tensor by calling all statistics from broker_stat dictionary.
Generally, this method should not be modified, implement corresponding get_broker_[mode]() methods.
"""
x_broker = np.concatenate(
[np.asarray(stat)[..., None] for stat in self.broker_stat.values()],
axis=-1
)
return x_broker[:, None, :]
def get_metadata_state(self):
self.metadata['timestamp'] = np.asarray(self._get_timestamp())
return self.metadata
def _get_time(self):
"""
Retrieves current time point of the episode data.
Returns:
datetime object
"""
return self.data.datetime.datetime()
def _get_timestamp(self):
"""
Sets attr. and returns current data timestamp.
Returns:
POSIX timestamp
"""
self.time_stamp = self._get_time().timestamp()
return self.time_stamp
def _get_broadcast_info(self):
"""
Transmits broadcasting message.
Returns:
dictionary or None
"""
try:
return self.get_broadcast_message()
except AttributeError:
return None
def get_broadcast_message(self):
"""
Override this.
Returns:
dictionary or None
"""
return None
def get_state(self):
"""
Collects estimated values for every mode of observation space by calling methods from
`collection_get_state_methods` dictionary.
As a rule, this method should not be modified, override or implement corresponding get_[mode]_state() methods,
defining necessary calculations and return arbitrary shaped tensors for every space mode.
Note:
- 'data' referes to bt.startegy datafeeds and should be treated as such.
Datafeed Lines that are not default to BTgymStrategy should be explicitly defined by
__init__() or define_datalines().
"""
# Update inner state statistic and compose state: <- moved to .next()
# self.update_broker_stat()
self.state = {key: method() for key, method in self.collection_get_state_methods.items()}
return self.state
def get_reward(self):
"""
Shapes reward function as normalized single trade realized profit/loss,
augmented with potential-based reward shaping functions in form of:
F(s, a, s`) = gamma * FI(s`) - FI(s);
Potential FI_1 is current normalized unrealized profit/loss.
Paper:
"Policy invariance under reward transformations:
Theory and application to reward shaping" by A. Ng et al., 1999;
http://www.robotics.stanford.edu/~ang/papers/shaping-icml99.pdf
"""
# All sliding statistics for this step are already updated by get_state().
# Potential-based shaping function 1:
# based on potential of averaged profit/loss for current opened trade (unrealized p/l):
unrealised_pnl = np.asarray(self.broker_stat['unrealized_pnl'])
current_pos_duration = self.broker_stat['pos_duration'][-1]
# We want to estimate potential `fi = gamma*fi_prime - fi` of current opened position,
# thus need to consider different cases given skip_fame parameter:
if current_pos_duration == 0:
# Set potential term to zero if there is no opened positions:
f1 = 0
fi_1_prime = 0
else:
if current_pos_duration < self.p.skip_frame:
fi_1 = 0
fi_1_prime = np.average(unrealised_pnl[-current_pos_duration:])
elif current_pos_duration < 2 * self.p.skip_frame:
fi_1 = np.average(
unrealised_pnl[-(self.p.skip_frame + current_pos_duration):-self.p.skip_frame]
)
fi_1_prime = np.average(unrealised_pnl[-self.p.skip_frame:])
else:
fi_1 = np.average(
unrealised_pnl[-2 * self.p.skip_frame:-self.p.skip_frame]
)
fi_1_prime = np.average(unrealised_pnl[-self.p.skip_frame:])
# Potential term:
f1 = self.p.gamma * fi_1_prime - fi_1
# Main reward function: normalized realized profit/loss:
realized_pnl = np.asarray(self.broker_stat['realized_pnl'])[-self.p.skip_frame:].sum()
# Weights are subject to tune:
self.reward = (10.0 * f1 + 10.0 * realized_pnl) * self.p.reward_scale
self.reward = np.clip(self.reward, -self.p.reward_scale, self.p.reward_scale)
return self.reward
def get_info(self):
"""
Composes information part of environment response,
can be any object. Override to own taste.
Note:
Due to 'skip_frame' feature, INFO part of environment response transmitted by server can be a list
containing either all skipped frame's info objects, i.e. [info[-9], info[-8], ..., info[0]] or
just latest one, [info[0]]. This behaviour is set inside btgym.server._BTgymAnalyzer().next() method.
"""
return dict(
step=self.iteration,
time=self.data.datetime.datetime(),
action=self.action,
broker_message=self.broker_message,
broker_cash=self.stats.broker.cash[0],
broker_value=self.stats.broker.value[0],
drawdown=self.stats.drawdown.drawdown[0],
max_drawdown=self.stats.drawdown.maxdrawdown[0],
)
def get_done(self):
"""
Episode termination estimator,
defines any trading logic conditions episode stop is called upon, e.g. <OMG! Stop it, we became too rich!>.
It is just a structural a convention method. Default method is empty.
Expected to return:
tuple (<is_done, type=bool>, <message, type=str>).
"""
return False, '-'
def _get_done(self):
"""
Default episode termination method,
checks base conditions episode stop is called upon:
1. Reached maximum episode duration. Need to check it explicitly, because <self.is_done> flag
is sent as part of environment response.
2. Got '_done' signal from outside. E.g. via env.reset() method invoked by outer RL algorithm.
3. Hit drawdown threshold.
4. Hit target profit threshold.
This method shouldn't be overridden or called explicitly.
Runtime execution logic is:
terminate episode if:
get_done() returned (True, 'something')
OR
ANY _get_done() default condition is met.
"""
if not self.is_done_enabled:
# Episode is on its way,
# apply base episode termination rules:
is_done_rules = [
# Do we approaching the end of the episode?:
(self.iteration >= \
self.data.numrecords - self.inner_embedding - self.p.skip_frame - self.steps_till_is_done,
'END OF DATA'),
# Any money left?:
(self.stats.drawdown.maxdrawdown[0] >= self.p.drawdown_call, 'DRAWDOWN CALL'),
# Party time?
(self.env.broker.get_value() > self.target_value, 'TARGET REACHED'),
]
# Append custom get_done() results, if any:
is_done_rules += [self.get_done()]
# Sweep through rules:
for (condition, message) in is_done_rules:
if condition:
# Start episode termination countdown for clean exit:
# to forcefully execute final `close` order and compute proper reward
# we need to make `steps_till_is_done` number of steps until `is_done` flag can be safely risen:
self.is_done_enabled = True
self.broker_message += message
self.final_message = message
self.order = self.close()
self.log.debug(
'Episode countdown started at: {}, {}, r:{}'.format(self.iteration, message, self.reward)
)
else:
# Now in episode termination phase,
# just keep hitting `Close` button:
self.steps_till_is_done -= 1
self.broker_message = 'CLOSE, {}'.format(self.final_message)
self.order = self.close()
self.log.debug(
'Episode countdown contd. at: {}, {}, r:{}'.format(self.iteration, self.broker_message, self.reward)
)
if self.steps_till_is_done <= 0:
# Now we've done, terminate:
self.is_done = True
return self.is_done
def notify_order(self, order):
"""
Shamelessly taken from backtrader tutorial.
TODO: better multi data support
"""
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.broker_message = 'BUY executed,\nPrice: {:.5f}, Cost: {:.4f}, Comm: {:.4f}'. \
format(order.executed.price,
order.executed.value,
order.executed.comm)
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.broker_message = 'SELL executed,\nPrice: {:.5f}, Cost: {:.4f}, Comm: {:.4f}'. \
format(order.executed.price,
order.executed.value,
order.executed.comm)
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.broker_message = 'ORDER FAILED with status: ' + str(order.getstatusname())
# Rise order_failed flag until get_reward() will [hopefully] use and reset it:
self.order_failed += 1
# self.log.warning('BM: {}'.format(self.broker_message))
self.order = None
def _next_discrete(self, action):
"""
Default implementation for discrete actions.
Note that orders can be submitted only for data_lines in action_space (assets).
Args:
action: dict, string encoding of btgym.spaces.ActionDictSpace
"""
for key, single_action in action.items():
# Simple action-to-order logic:
if single_action == 'hold' or self.is_done_enabled:
pass
elif single_action == 'buy':
self.order = self.buy(data=key, size=self.p.order_size[key])
self.broker_message = 'new {}_BUY created; '.format(key) + self.broker_message
elif single_action == 'sell':
self.order = self.sell(data=key, size=self.p.order_size[key])
self.broker_message = 'new {}_SELL created; '.format(key) + self.broker_message
elif single_action == 'close':
self.order = self.close(data=key)
self.broker_message = 'new {}_CLOSE created; '.format(key) + self.broker_message
# Somewhere after this point, server-side _BTgymAnalyzer() is exchanging information with environment wrapper,
# obtaining <self.action> , composing and sending <state,reward,done,info> etc... never mind.
def _next_target_percent(self, action):
"""
Uses `order_target_percent` method to rebalance assets to given ratios. Expects action for every asset to be
a float scalar in [0,1], with actions sum to 1 over all assets (including base one).
Note that action for base asset (cash) is ignored.
For details refer to: https://www.backtrader.com/docu/order_target/order_target.html
"""
# TODO 1: filter similar actions to prevent excessive orders issue e.g by DKL on two consecutive ones
# TODO 2: actions discretesation on level of execution
for asset in self.p.asset_names:
# Reducing assets positions subj to 5% margin reserve:
single_action = round(float(action[asset]) * 0.9, 2)
self.order = self.order_target_percent(data=asset, target=single_action)
self.broker_message += ' new {}->{:1.0f}% created; '.format(asset, single_action * 100)
| lgpl-3.0 | 1,417,447,790,621,117,000 | 39.889968 | 120 | 0.573407 | false |
rbuffat/pyidf | tests/test_airflownetworkintrazonelinkage.py | 1 | 1949 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.natural_ventilation_and_duct_leakage import AirflowNetworkIntraZoneLinkage
log = logging.getLogger(__name__)
class TestAirflowNetworkIntraZoneLinkage(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_airflownetworkintrazonelinkage(self):
pyidf.validation_level = ValidationLevel.error
obj = AirflowNetworkIntraZoneLinkage()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_node_1_name = "Node 1 Name"
obj.node_1_name = var_node_1_name
# alpha
var_node_2_name = "Node 2 Name"
obj.node_2_name = var_node_2_name
# object-list
var_component_name = "object-list|Component Name"
obj.component_name = var_component_name
# object-list
var_airflownetworkmultizonesurface_name = "object-list|AirflowNetwork:MultiZone:Surface Name"
obj.airflownetworkmultizonesurface_name = var_airflownetworkmultizonesurface_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].name, var_name)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].node_1_name, var_node_1_name)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].node_2_name, var_node_2_name)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].component_name, var_component_name)
self.assertEqual(idf2.airflownetworkintrazonelinkages[0].airflownetworkmultizonesurface_name, var_airflownetworkmultizonesurface_name) | apache-2.0 | 4,970,112,966,095,272,000 | 35.111111 | 142 | 0.691637 | false |
tadamic/sokoenginepy | tests/manager/board_manager_spec.py | 1 | 12342 | from itertools import permutations
import pytest
from sokoenginepy import (
DEFAULT_PIECE_ID,
BoardManager,
CellAlreadyOccupiedError,
SokobanBoard,
SokobanPlus,
)
class DescribeBoardManager:
def it_memoizes_pushers(
self, board_manager, pushers_positions, invalid_pusher_position
):
assert board_manager.pushers_count == 2
assert sorted(board_manager.pushers_ids) == list(pushers_positions.keys())
assert board_manager.pushers_positions == pushers_positions
for pusher_id, pusher_position in pushers_positions.items():
assert board_manager.pusher_position(pusher_id) == pusher_position
assert board_manager.pusher_id_on(pusher_position) == pusher_id
assert board_manager.has_pusher(pusher_id) is True
assert board_manager.has_pusher_on(pusher_position) is True
with pytest.raises(KeyError):
board_manager.pusher_position(
DEFAULT_PIECE_ID + board_manager.pushers_count
)
with pytest.raises(KeyError):
board_manager.pusher_id_on(invalid_pusher_position)
assert (
board_manager.has_pusher(DEFAULT_PIECE_ID + board_manager.pushers_count)
is False
)
assert board_manager.has_pusher_on(invalid_pusher_position) is False
def it_memoizes_boxes(
self, board_manager, boxes_positions, boxes_ids, invalid_box_position
):
assert board_manager.boxes_count == 6
assert sorted(board_manager.boxes_ids) == boxes_ids
assert board_manager.boxes_positions == boxes_positions
for box_id, box_position in boxes_positions.items():
assert board_manager.box_position(box_id) == box_position
assert board_manager.box_id_on(box_position) == box_id
assert board_manager.has_box(box_id) is True
assert board_manager.has_box_on(box_position) is True
with pytest.raises(KeyError):
board_manager.box_position(DEFAULT_PIECE_ID + board_manager.boxes_count)
with pytest.raises(KeyError):
board_manager.box_id_on(invalid_box_position)
assert (
board_manager.has_box(DEFAULT_PIECE_ID + board_manager.boxes_count) is False
)
assert board_manager.has_box_on(invalid_box_position) is False
for box_id, box_position in boxes_positions.items():
assert board_manager.box_plus_id(box_id) == SokobanPlus.DEFAULT_PLUS_ID
# Doesn't rise KeyError if plus is disabled...
assert (
board_manager.box_plus_id(DEFAULT_PIECE_ID + board_manager.boxes_count)
== SokobanPlus.DEFAULT_PLUS_ID
)
board_manager.boxorder = "1 3 2"
board_manager.goalorder = "3 1 2"
board_manager.enable_sokoban_plus()
assert board_manager.box_plus_id(DEFAULT_PIECE_ID) == 1
assert board_manager.box_plus_id(DEFAULT_PIECE_ID + 1) == 3
assert board_manager.box_plus_id(DEFAULT_PIECE_ID + 2) == 2
assert (
board_manager.box_plus_id(DEFAULT_PIECE_ID + 3)
== SokobanPlus.DEFAULT_PLUS_ID
)
assert (
board_manager.box_plus_id(DEFAULT_PIECE_ID + 4)
== SokobanPlus.DEFAULT_PLUS_ID
)
assert (
board_manager.box_plus_id(DEFAULT_PIECE_ID + 5)
== SokobanPlus.DEFAULT_PLUS_ID
)
with pytest.raises(KeyError):
assert board_manager.box_plus_id(
DEFAULT_PIECE_ID + board_manager.boxes_count
)
def it_memoizes_goals(
self, board_manager, goals_positions, goals_ids, invalid_goal_position
):
assert board_manager.goals_count == 6
assert sorted(board_manager.goals_ids) == goals_ids
assert board_manager.goals_positions == goals_positions
for goal_id, goal_position in goals_positions.items():
assert board_manager.goal_position(goal_id) == goal_position
assert board_manager.goal_id_on(goal_position) == goal_id
assert board_manager.has_goal(goal_id) is True
assert board_manager.has_goal_on(goal_position) is True
with pytest.raises(KeyError):
board_manager.goal_position(DEFAULT_PIECE_ID + board_manager.goals_count)
with pytest.raises(KeyError):
board_manager.goal_id_on(invalid_goal_position)
assert (
board_manager.has_goal(DEFAULT_PIECE_ID + board_manager.goals_count)
is False
)
assert board_manager.has_goal_on(invalid_goal_position) is False
for goal_id, goal_position in goals_positions.items():
assert board_manager.goal_plus_id(goal_id) == SokobanPlus.DEFAULT_PLUS_ID
# Doesn't rise KeyError if plus is disabled...
assert (
board_manager.goal_plus_id(DEFAULT_PIECE_ID + board_manager.goals_count)
== SokobanPlus.DEFAULT_PLUS_ID
)
board_manager.boxorder = "1 3 2"
board_manager.goalorder = "3 1 2"
board_manager.enable_sokoban_plus()
assert board_manager.goal_plus_id(DEFAULT_PIECE_ID) == 3
assert board_manager.goal_plus_id(DEFAULT_PIECE_ID + 1) == 1
assert board_manager.goal_plus_id(DEFAULT_PIECE_ID + 2) == 2
assert (
board_manager.goal_plus_id(DEFAULT_PIECE_ID + 3)
== SokobanPlus.DEFAULT_PLUS_ID
)
assert (
board_manager.goal_plus_id(DEFAULT_PIECE_ID + 4)
== SokobanPlus.DEFAULT_PLUS_ID
)
assert (
board_manager.goal_plus_id(DEFAULT_PIECE_ID + 5)
== SokobanPlus.DEFAULT_PLUS_ID
)
with pytest.raises(KeyError):
assert board_manager.goal_plus_id(
DEFAULT_PIECE_ID + board_manager.goals_count
)
def it_calculates_all_valid_board_solutions(
self, board_manager, all_solutions, sokoban_plus_solutions
):
assert list(board_manager.solutions()) == all_solutions
board_manager.boxorder = "1 3 2"
board_manager.goalorder = "3 2 1"
board_manager.enable_sokoban_plus()
assert list(board_manager.solutions()) == sokoban_plus_solutions
def it_moves_boxes(self, board_manager):
old_box_position = board_manager.box_position(DEFAULT_PIECE_ID)
board_manager.move_box(DEFAULT_PIECE_ID, 0)
assert board_manager.box_position(DEFAULT_PIECE_ID) == 0
assert board_manager.has_box_on(0)
assert not board_manager.has_box_on(old_box_position)
assert board_manager.board[0].has_box
assert not board_manager.board[old_box_position].has_box
board_manager.move_box(DEFAULT_PIECE_ID, old_box_position)
board_manager.move_box_from(old_box_position, 0)
assert board_manager.box_position(DEFAULT_PIECE_ID) == 0
assert board_manager.has_box_on(0)
assert not board_manager.has_box_on(old_box_position)
assert board_manager.board[0].has_box
assert not board_manager.board[old_box_position].has_box
def it_moves_pushers(self, board_manager):
old_pusher_position = board_manager.pusher_position(DEFAULT_PIECE_ID)
board_manager.move_pusher(DEFAULT_PIECE_ID, 0)
assert board_manager.pusher_position(DEFAULT_PIECE_ID) == 0
assert board_manager.has_pusher_on(0)
assert not board_manager.has_pusher_on(old_pusher_position)
assert board_manager.board[0].has_pusher
assert not board_manager.board[old_pusher_position].has_pusher
board_manager.move_pusher(DEFAULT_PIECE_ID, old_pusher_position)
board_manager.move_pusher_from(old_pusher_position, 0)
assert board_manager.pusher_position(DEFAULT_PIECE_ID) == 0
assert board_manager.has_pusher_on(0)
assert not board_manager.has_pusher_on(old_pusher_position)
assert board_manager.board[0].has_pusher
assert not board_manager.board[old_pusher_position].has_pusher
def test_moving_box_onto_obstacle_raises_exception(
self, board_manager, wall_position, pushers_positions, boxes_positions
):
box_id = DEFAULT_PIECE_ID
box_position = board_manager.box_position(box_id)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_box(box_id, boxes_positions[DEFAULT_PIECE_ID + 1])
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_box_from(
box_position, boxes_positions[DEFAULT_PIECE_ID + 1]
)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_box(box_id, pushers_positions[DEFAULT_PIECE_ID])
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_box_from(
box_position, pushers_positions[DEFAULT_PIECE_ID]
)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_box(box_id, wall_position)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_box_from(box_position, wall_position)
def test_moving_pusher_onto_obstacle_raises_exception(
self, board_manager, wall_position, boxes_positions, pushers_positions
):
pusher_id = DEFAULT_PIECE_ID
pusher_position = board_manager.pusher_position(pusher_id)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_pusher(pusher_id, boxes_positions[DEFAULT_PIECE_ID])
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_pusher_from(
pusher_position, boxes_positions[DEFAULT_PIECE_ID]
)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_pusher(
pusher_id, pushers_positions[DEFAULT_PIECE_ID + 1]
)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_pusher_from(
pusher_position, pushers_positions[DEFAULT_PIECE_ID + 1]
)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_pusher(pusher_id, wall_position)
with pytest.raises(CellAlreadyOccupiedError):
board_manager.move_pusher_from(pusher_position, wall_position)
def it_implements_switching_box_and_goal_positions(
self,
board_manager,
boxes_positions,
switched_boxes,
goals_positions,
switched_goals,
board_str,
switched_board_str,
):
board_manager.switch_boxes_and_goals()
assert board_manager.boxes_positions == switched_boxes
assert board_manager.goals_positions == switched_goals
assert str(board_manager.board) == switched_board_str
board_manager.switch_boxes_and_goals()
assert board_manager.boxes_positions == boxes_positions
assert board_manager.goals_positions == goals_positions
assert str(board_manager.board) == board_str
def test_switching_respects_sokoban_plus_if_enabled(
self,
board_manager,
switched_boxes_plus,
switched_goals_plus,
boxes_positions,
goals_positions,
):
board_manager.boxorder = "1 3 2"
board_manager.goalorder = "3 2 1"
board_manager.enable_sokoban_plus()
board_manager.switch_boxes_and_goals()
assert board_manager.boxes_positions == switched_boxes_plus
assert board_manager.goals_positions == switched_goals_plus
result = board_manager.switch_boxes_and_goals()
assert board_manager.boxes_positions == boxes_positions
assert board_manager.goals_positions == goals_positions
def test_switching_moves_pusher_out_of_the_way(self):
board_str = "\n".join(
["########", "# #", "# + #", "# $ #", "########"]
)
switched_board_str = "\n".join(
["########", "# #", "# $ #", "# + #", "########"]
)
b = SokobanBoard(board_str=board_str)
board_manager = BoardManager(b)
board_manager.switch_boxes_and_goals()
assert str(board_manager.board) == switched_board_str
board_manager.switch_boxes_and_goals()
assert str(board_manager.board) == board_str
| gpl-3.0 | -4,757,934,307,954,671,000 | 39.201954 | 88 | 0.631826 | false |
adobe-research/spark-cluster-deployment | initial-deployment-puppet/modules/spark/files/spark/examples/src/main/python/wordcount.py | 1 | 1306 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from operator import add
from pyspark import SparkContext
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: wordcount <file>"
exit(-1)
sc = SparkContext(appName="PythonWordCount")
lines = sc.textFile(sys.argv[1], 1)
counts = lines.flatMap(lambda x: x.split(' ')) \
.map(lambda x: (x, 1)) \
.reduceByKey(add)
output = counts.collect()
for (word, count) in output:
print "%s: %i" % (word, count)
| apache-2.0 | 8,318,567,125,479,649,000 | 36.314286 | 74 | 0.687596 | false |
vlfedotov/django-business-logic | business_logic/rest/serializers.py | 1 | 9163 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.utils import six
from rest_framework import serializers
from ..models import (ExceptionLog, Execution, ExecutionArgument, ExecutionEnvironment, FunctionDefinition,
FunctionLibrary, LogEntry, Program, ProgramArgument, ProgramArgumentField, ProgramInterface,
ProgramVersion, ReferenceDescriptor, FunctionArgument, FunctionArgumentChoice)
from ..models.types_ import TYPES_FOR_DJANGO_FIELDS, DJANGO_FIELDS_FOR_TYPES
from ..blockly.build import BlocklyXmlBuilder
from ..blockly.create import NodeTreeCreator
from ..blockly.parse import BlocklyXmlParser
def get_model_name(content_type):
return '{}.{}'.format(content_type.app_label, content_type.model_class().__name__)
def get_model_verbose_name(content_type):
return content_type.model_class()._meta.verbose_name
class ContentTypeSerializer(serializers.Serializer):
name = serializers.SerializerMethodField()
verbose_name = serializers.SerializerMethodField()
id = serializers.IntegerField()
def get_verbose_name(self, obj):
return get_model_verbose_name(obj)
def get_name(self, obj):
return get_model_name(obj)
class FunctionArgumentChoiceSerializer(serializers.ModelSerializer):
class Meta:
model = FunctionArgumentChoice
fields = (
'value',
'title',
)
class FunctionArgumentSerializer(serializers.ModelSerializer):
choices = FunctionArgumentChoiceSerializer(many=True)
class Meta:
model = FunctionArgument
fields = ('name', 'description', 'choices')
class FunctionDefinitionSerializer(serializers.ModelSerializer):
arguments = FunctionArgumentSerializer(many=True)
class Meta:
model = FunctionDefinition
exclude = ('id', 'polymorphic_ctype')
class FunctionLibrarySerializer(serializers.ModelSerializer):
functions = FunctionDefinitionSerializer(many=True)
class Meta:
model = FunctionLibrary
exclude = ('id',)
class ExecutionEnvironmentSerializer(serializers.ModelSerializer):
libraries = FunctionLibrarySerializer(many=True)
class Meta:
model = ExecutionEnvironment
exclude = ('id',)
class ProgramInterfaceListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='business-logic:rest:program-interface')
class Meta:
model = ProgramInterface
fields = '__all__'
class BlocklyXMLSerializer(serializers.CharField):
def to_representation(self, instance):
return BlocklyXmlBuilder().build(instance)
def to_internal_value(self, data):
return NodeTreeCreator().create(BlocklyXmlParser().parse(data)[0])
def run_validation(self, data=serializers.empty):
if data == '' or (self.trim_whitespace and six.text_type(data).strip() == ''):
if not self.allow_blank:
self.fail('blank')
return ''
(is_empty_value, data) = self.validate_empty_values(data)
if is_empty_value:
return data
try:
BlocklyXmlParser().parse(data)
except Exception as e:
raise serializers.ValidationError(
["Xml parse error - {}: {}".format(e.__class__.__name__, six.text_type(e))])
value = self.to_internal_value(data)
self.run_validators(value)
return value
class ProgramSerializer(serializers.ModelSerializer):
environment = ExecutionEnvironmentSerializer(read_only=True)
class Meta:
model = Program
fields = '__all__'
class ProgramListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='business-logic:rest:program')
class Meta:
model = Program
fields = '__all__'
class ProgramVersionListSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='business-logic:rest:program-version')
class Meta:
model = ProgramVersion
read_only_fields = ('is_default',)
exclude = ('entry_point',)
class ProgramVersionCreateSerializer(serializers.ModelSerializer):
xml = BlocklyXMLSerializer(source='entry_point', required=True)
id = serializers.IntegerField(read_only=True)
class Meta:
model = ProgramVersion
fields = ('title', 'description', 'xml', 'program', 'id')
class ProgramVersionSerializer(serializers.ModelSerializer):
xml = BlocklyXMLSerializer(source='entry_point', required=True)
program = serializers.PrimaryKeyRelatedField(read_only=True)
environment = ExecutionEnvironmentSerializer(read_only=True)
class Meta:
model = ProgramVersion
exclude = ('entry_point',)
class ReferenceDescriptorListSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
verbose_name = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
content_type = ContentTypeSerializer()
class Meta:
model = ReferenceDescriptor
exclude = ('title',)
def get_name(self, obj):
return get_model_name(obj.content_type)
def get_verbose_name(self, obj):
return obj.title or obj.content_type.model_class()._meta.verbose_name
def get_url(self, obj):
return reverse('business-logic:rest:reference-list', kwargs=dict(model=get_model_name(obj.content_type)))
class ReferenceSerializer(serializers.ModelSerializer):
id = serializers.IntegerField()
name = serializers.SerializerMethodField()
def get_fields(self):
declared_fields = copy.deepcopy(self._declared_fields)
return declared_fields
def get_name(self, obj):
reference_descriptor = self.context['view'].get_reference_descriptor()
return six.text_type(getattr(obj, reference_descriptor.name_field) if reference_descriptor.name_field else obj)
class ProgramArgumentFieldSerializer(serializers.ModelSerializer):
class Meta:
model = ProgramArgumentField
def to_representation(self, instance):
representation = {}
representation['name'] = instance.name
argument = instance.program_argument
model = argument.content_type.model_class()
field_names = instance.name.split('.')
for i, field_name in enumerate(field_names):
field = model._meta.get_field(field_name)
is_last_field = i == len(field_names) - 1
is_django_model = field.__class__ in DJANGO_FIELDS_FOR_TYPES['model']
if is_django_model:
model = field.related_model
if is_last_field:
representation['data_type'] = TYPES_FOR_DJANGO_FIELDS[field.__class__]
representation['content_type'] = (ContentTypeSerializer().to_representation(
ContentType.objects.get_for_model(model)) if is_django_model else None)
representation['verbose_name'] = instance.get_title()
return representation
class ProgramArgumentSerializer(serializers.ModelSerializer):
fields = ProgramArgumentFieldSerializer(many=True)
verbose_name = serializers.SerializerMethodField()
content_type = ContentTypeSerializer()
class Meta:
model = ProgramArgument
exclude = ('id', 'program_interface', 'variable_definition')
def get_verbose_name(self, obj):
return get_model_verbose_name(obj.content_type)
class ProgramInterfaceSerializer(serializers.ModelSerializer):
arguments = ProgramArgumentSerializer(many=True)
environment = ExecutionEnvironmentSerializer()
class Meta:
model = ProgramInterface
exclude = ('id',)
class ExecutionListSerializer(serializers.ModelSerializer):
class Meta:
model = Execution
exclude = ('log',)
class ExecutionArgumentSerializer(serializers.ModelSerializer):
content_type = ContentTypeSerializer()
name = serializers.SerializerMethodField()
verbose_name = serializers.SerializerMethodField()
class Meta:
model = ExecutionArgument
exclude = ('id', 'program_argument', 'execution')
def get_name(self, obj):
return obj.program_argument.name
def get_verbose_name(self, obj):
return get_model_verbose_name(obj.content_type)
class ExecutionSerializer(serializers.ModelSerializer):
arguments = ExecutionArgumentSerializer(many=True)
class Meta:
model = Execution
exclude = ('log',)
class ExceptionLogSerializer(serializers.ModelSerializer):
class Meta:
model = ExceptionLog
exclude = ('log_entry', 'id')
class LogSerializer(serializers.ModelSerializer):
exception = ExceptionLogSerializer()
class Meta:
model = LogEntry
exclude = ('sib_order', 'parent', 'id')
def get_fields(self):
fields = super(LogSerializer, self).get_fields()
fields['children'] = LogSerializer(many=True)
return fields
| mit | -2,308,939,436,610,275,000 | 29.543333 | 119 | 0.68973 | false |
joelarmstrong/analysis-purgatory | splitting-top-down/plot.py | 1 | 2309 | #!/usr/bin/env python
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def main():
sns.set_style('ticks')
df = pd.read_csv('results.csv')
df['fraction_good_splits'] = (df['perfect_splits']/df['fraction_perfect_splits'] - df['mismatching_leaf_sets'] - df['flipped_splits']) / (df['perfect_splits']/df['fraction_perfect_splits'])
grid = sns.FacetGrid(df, size=5, row='evaluation_method',
row_order=['none', 'relaxed-split-decomposition', 'split-decomposition'],
hue_order=['k-means', 'k-modes', 'maximum-likelihood', 'upgma', 'neighbor-joining', 'guided-neighbor-joining', 'split-decomposition'],
aspect=16.0/9, legend_out=True)
grid.map(sns.boxplot, 'loss_rate', 'fraction_good_splits', 'cluster_method', palette='colorblind', hue_order=['k-means', 'k-modes', 'maximum-likelihood', 'upgma', 'neighbor-joining', 'guided-neighbor-joining', 'split-decomposition']).set_axis_labels('Loss rate (as fraction of substitution rate)', 'Fraction of true splits correctly split or unresolved')
legend = plt.legend(loc='center left', bbox_to_anchor=(1, 1.5))
sns.plt.savefig('varying_loss_rate.pdf', bbox_extra_artists=(legend,), bbox_inches='tight')
grid = sns.FacetGrid(df, size=5, row='evaluation_method',
row_order=['none', 'relaxed-split-decomposition', 'split-decomposition'],
hue_order=['k-means', 'k-modes', 'maximum-likelihood', 'upgma', 'neighbor-joining', 'guided-neighbor-joining', 'split-decomposition'],
aspect=16.0/9, legend_out=True)
grid.map(sns.boxplot, 'duplication_rate', 'fraction_good_splits', 'cluster_method', palette='colorblind', hue_order=['k-means', 'k-modes', 'maximum-likelihood', 'upgma', 'neighbor-joining', 'guided-neighbor-joining', 'split-decomposition']).set_axis_labels('Duplication rate (as fraction of substitution rate)', 'Fraction of true splits correctly split or unresolved')
legend = plt.legend(loc='center left', bbox_to_anchor=(1, 1.5))
sns.plt.savefig('varying_duplication_rate.pdf', bbox_extra_artists=(legend,), bbox_inches='tight')
print df.groupby(['cluster_method', 'evaluation_method']).sum().to_csv()
if __name__ == '__main__':
main()
| mit | 1,598,443,924,317,746,700 | 73.483871 | 372 | 0.658294 | false |
vinodpanicker/scancode-toolkit | src/scancode/format.py | 1 | 5795 | #
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function, absolute_import
from collections import OrderedDict
from operator import itemgetter
from os.path import dirname
from os.path import exists
from os.path import join
from os.path import abspath
from os import makedirs
from commoncode import fileutils
"""
Format scans outputs.
"""
def get_html_template(format): # @ReservedAssignment
"""
Given a format string corresponding to a template directory, load and return
the template.html file found in that directory.
"""
from jinja2 import Environment, FileSystemLoader
templates_dir = get_template_dir(format)
env = Environment(loader=FileSystemLoader(templates_dir))
template = env.get_template('template.html')
return template
def get_template_dir(format): # @ReservedAssignment
"""
Given a format string return the corresponding template directory.
"""
return join(dirname(__file__), 'templates', format)
def as_html_app(scanned_path, output_file):
"""
Return an HTML string built from a list of results and the html-app template.
"""
template = get_html_template('html-app')
_, assets_dir = get_html_app_files_dirs(output_file)
return template.render(assets_dir=assets_dir, scanned_path=scanned_path)
class HtmlAppAssetCopyWarning(Exception):
pass
class HtmlAppAssetCopyError(Exception):
pass
def is_stdout(output_file):
return output_file.name == '<stdout>'
def get_html_app_files_dirs(output_file):
"""
Return a tuple of (parent_dir, dir_name) directory named after the
`output_file` file object file_base_name (stripped from extension) and a
`_files` suffix Return empty strings if output is to stdout.
"""
if is_stdout(output_file):
return '', ''
file_name = output_file.name
parent_dir = dirname(file_name)
dir_name = fileutils.file_base_name(file_name) + '_files'
return parent_dir, dir_name
def create_html_app_assets(results, output_file):
"""
Given an html-app output_file, create the corresponding `_files` directory
and copy the assets to this directory. The target directory is deleted if it
exists.
Raise HtmlAppAssetCopyWarning if the output_file is <stdout> or
HtmlAppAssetCopyError if the copy was not possible.
"""
try:
if is_stdout(output_file):
raise HtmlAppAssetCopyWarning()
assets_dir = join(get_template_dir('html-app'), 'assets')
tgt_dirs = get_html_app_files_dirs(output_file)
target_dir = join(*tgt_dirs)
if exists(target_dir):
fileutils.delete(target_dir)
fileutils.copytree(assets_dir, target_dir)
# write json data
import json
root_path, assets_dir = get_html_app_files_dirs(output_file)
with open(join(root_path, assets_dir, 'data.json'), 'w') as f:
f.write('data=' + json.dumps(results))
except HtmlAppAssetCopyWarning, w:
raise w
except Exception, e:
raise HtmlAppAssetCopyError(e)
def as_html(detected_data):
"""
Return an HTML string built from a list of results and the html template.
"""
template = get_html_template('html')
converted = OrderedDict()
licenses = {}
# Create a dict keyed by location
for scan_result in detected_data:
location = scan_result['location']
results = []
if 'copyrights' in scan_result:
for entry in scan_result['copyrights']:
results.append({
'start': entry['start_line'],
'end': entry['end_line'],
'what': 'copyright',
# NOTE: we display one statement per line.
'value': '\n'.join(entry['statements']),
})
if 'licenses' in scan_result:
for entry in scan_result['licenses']:
results.append({
'start': entry['start_line'],
'end': entry['end_line'],
'what': 'license',
'value': entry['key'],
})
if entry['key'] not in licenses:
licenses[entry['key']] = entry
if results:
converted[location] = sorted(results, key=itemgetter('start'))
licenses = OrderedDict(sorted(licenses.items()))
return template.render(results=converted, licenses=licenses)
| apache-2.0 | 590,873,037,973,926,900 | 33.088235 | 82 | 0.663158 | false |
aagusti/e-gaji | egaji/views/user_unit.py | 1 | 8985 | from email.utils import parseaddr
from sqlalchemy import not_
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import (
HTTPFound,
)
import colander
from deform import (
Form,
widget,
ValidationFailure,
)
from ..models import (
DBSession,
User,
)
from ..models.pemda_model import (
UnitModel,
UserUnitModel,
)
from datatables import ColumnDT, DataTables
SESS_ADD_FAILED = 'Tambah user gagal'
SESS_EDIT_FAILED = 'Edit user gagal'
########
# List #
########
@view_config(route_name='user-unit', renderer='templates/userunit/list.pt',
permission='read')
def view_list(request):
#rows = DBSession.query(User).filter(User.id > 0).order_by('email')
return dict(project='e-Gaji')
##########
# Action #
##########
@view_config(route_name='user-unit-act', renderer='json',
permission='read')
def usr_unit_act(request):
ses = request.session
req = request
params = req.params
url_dict = req.matchdict
if url_dict['act']=='grid':
columns = []
columns.append(ColumnDT('id'))
columns.append(ColumnDT('email'))
columns.append(ColumnDT('user_name'))
columns.append(ColumnDT('status'))
columns.append(ColumnDT('last_login_date'))
columns.append(ColumnDT('registered_date'))
columns.append(ColumnDT('nama'))
query = DBSession.query(User.id, User.user_name, User.email, User.status,
User.last_login_date, User.registered_date,
UnitModel.nama).outerjoin(UserUnitModel).outerjoin(UnitModel)
rowTable = DataTables(req, User, query, columns)
return rowTable.output_result()
#######
# Add #
#######
def email_validator(node, value):
name, email = parseaddr(value)
if not email or email.find('@') < 0:
raise colander.Invalid(node, 'Invalid email format')
def form_validator(form, value):
def err_email():
raise colander.Invalid(form,
'Email %s sudah digunakan oleh user ID %d' % (
value['email'], found.id))
def err_name():
raise colander.Invalid(form,
'Nama user %s sudah digunakan oleh ID %d' % (
value['user_name'], found.id))
if 'id' in form.request.matchdict:
uid = form.request.matchdict['id']
q = DBSession.query(User).filter_by(id=uid)
user = q.first()
else:
user = None
q = DBSession.query(User).filter_by(email=value['email'])
found = q.first()
if user:
if found and found.id != user.id:
err_email()
elif found:
err_email()
if 'user_name' in value: # optional
found = User.get_by_name(value['user_name'])
if user:
if found and found.id != user.id:
err_name()
elif found:
err_name()
@colander.deferred
def deferred_status(node, kw):
values = kw.get('daftar_status', [])
return widget.SelectWidget(values=values)
STATUS = (
(1, 'Active'),
(0, 'Inactive'),
)
class AddSchema(colander.Schema):
unit_widget = widget.AutocompleteInputWidget(
size=60,
values = '/unit/act/headofnama',
min_length=1)
email = colander.SchemaNode(colander.String(),
validator=email_validator)
user_name = colander.SchemaNode(
colander.String(),
missing=colander.drop)
status = colander.SchemaNode(
colander.String(),
widget=deferred_status)
password = colander.SchemaNode(
colander.String(),
widget=widget.PasswordWidget(),
missing=colander.drop)
unit_nm = colander.SchemaNode(
colander.String(),
widget=unit_widget,
missing=colander.drop,
oid = "unit_nm",
)
unit_id = colander.SchemaNode(
colander.Integer(),
widget=widget.HiddenWidget(),
missing=colander.drop,
oid = "unit_id")
sub_unit = colander.SchemaNode(
colander.Boolean(),
missing=colander.drop,
)
class EditSchema(AddSchema):
id = colander.SchemaNode(colander.String(),
missing=colander.drop,
widget=widget.HiddenWidget(readonly=True))
def get_form(request, class_form):
schema = class_form(validator=form_validator)
schema = schema.bind(daftar_status=STATUS)
schema.request = request
return Form(schema, buttons=('simpan','batal'))
def save(values, user, row=None):
if not row:
row = User()
row.from_dict(values)
if values['password']:
row.password = values['password']
DBSession.add(row)
DBSession.flush()
if values['unit_id']:
if row.units:
row_unit = UserUnitModel.query_user_id(row.id).first()
else:
row_unit = UserUnitModel()
row_unit.user_id = row.id
row_unit.from_dict(values)
row_unit.sub_unit = 'sub_unit' in values and values['sub_unit'] and 1 or 0
DBSession.add(row_unit)
DBSession.flush()
return row
def save_request(values, request, row=None):
if 'id' in request.matchdict:
values['id'] = request.matchdict['id']
row = save(values, request.user, row)
request.session.flash('User %s has been saved.' % row.email)
def route_list(request):
return HTTPFound(location=request.route_url('user-unit'))
def session_failed(request, session_name):
r = dict(form=request.session[session_name])
del request.session[session_name]
return r
@view_config(route_name='user-unit-add', renderer='templates/userunit/add.pt',
permission='add')
def view_add(request):
form = get_form(request, AddSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
request.session[SESS_ADD_FAILED] = e.render()
return HTTPFound(location=request.route_url('user-add'))
save_request(dict(controls), request)
return route_list(request)
elif SESS_ADD_FAILED in request.session:
return session_failed(request, SESS_ADD_FAILED)
return dict(form=form.render())
########
# Edit #
########
def query_id(request):
return DBSession.query(User).filter_by(id=request.matchdict['id'])
def id_not_found(request):
msg = 'User ID %s not found.' % request.matchdict['id']
request.session.flash(msg, 'error')
return route_list(request)
@view_config(route_name='user-unit-edit', renderer='templates/userunit/edit.pt',
permission='edit')
def view_edit(request):
row = query_id(request).first()
if not row:
return id_not_found(request)
form = get_form(request, EditSchema)
if request.POST:
if 'simpan' in request.POST:
controls = request.POST.items()
try:
c = form.validate(controls)
except ValidationFailure, e:
request.session[SESS_EDIT_FAILED] = e.render()
return HTTPFound(location=request.route_url('user-unit-edit',
id=row.id))
save_request(dict(controls), request, row)
return route_list(request)
elif SESS_EDIT_FAILED in request.session:
return session_failed(request, SESS_EDIT_FAILED)
values = row.to_dict()
if row.units:
row_unit = UserUnitModel.query_user_id(row.id).first()
values['sub_unit'] = row_unit.sub_unit
values['unit_id'] = row_unit.unit_id
values['unit_nm'] = row_unit.units.nama
return dict(form=form.render(appstruct=values))
##########
# Delete #
##########
@view_config(route_name='user-unit-delete', renderer='templates/userunit/delete.pt',
permission='delete')
def view_delete(request):
q = query_id(request)
row = q.first()
if not row:
return id_not_found(request)
form = Form(colander.Schema(), buttons=('delete','cancel'))
if request.POST:
if 'delete' in request.POST:
msg = 'User ID %d %s has been deleted.' % (row.id, row.email)
q.units.delete()
q.delete()
DBSession.flush()
request.session.flash(msg)
return route_list(request)
return dict(row=row,
form=form.render())
| gpl-2.0 | 7,830,063,188,745,901,000 | 31.089286 | 93 | 0.563606 | false |
Valitseja/Postcard | postcard/cli.py | 1 | 1812 | # -*- coding: utf-8 -*-
"""Description"""
import json
import sys
import os
import click
from postcard.postcard import Postcard
from postcard.mailer import Mailman
import postcard.templater as templater
@click.command()
@click.option('--config', type=click.Path(exists=True), help="Path to configuration file")
@click.argument('variant', nargs=1)
def cli(config, variant):
"""."""
if not config:
click.echo("Config file not provided, trying to use default")
cwd = os.path.dirname(os.path.abspath(__file__))
abspath = os.path.join(cwd, r"..\config.json")
if os.path.exists(abspath):
config = abspath
else:
click.echo(abspath)
click.echo("Default config file does not exist!")
sys.exit(1)
with open(config) as json_config:
try:
info = (json.loads(json_config.read()))[variant]
except KeyError:
click.echo("Variant '%s' is not part of the config!" % variant)
sys.exit(1)
try:
subject = info['subject']
sender = info['sender']
recipients = info['recipients']
txt_msg = info['plain']
template = info['template']['path']
tags = info['template']['tags']
host = info['host']
port = info['port']
images = info['images']
except KeyError as err:
click.echo("Missing required field '%s'" % err.args)
sys.exit(1)
html_msg = templater.render(template, tags)
my_postcard = Postcard(subject, sender, recipients)
my_postcard.create(txt_msg, html_msg)
for image in images:
my_postcard.add_image(image)
mailman = Mailman(host, port)
mailman.connect()
mailman.deliver(sender, recipients, my_postcard.package())
click.echo('All Done!')
| mit | 2,748,098,555,682,565,600 | 28.225806 | 90 | 0.607064 | false |
daeilkim/refinery | refinery/bnpy/bnpy-dev/bnpy/init/FromTruth.py | 1 | 2192 | '''
FromTruth.py
Initialize params of a bnpy model using "ground truth" information,
such as human annotations
These are provided within a Data object, as a "TrueLabels" field
'''
import numpy as np
import FromScratchMult
def init_global_params(hmodel, Data, initname=None, seed=0, nRepeatTrue=2, **kwargs):
''' Initialize (in-place) the global params of the given hmodel
using the true labels associated with the Data
Args
-------
hmodel : bnpy model object to initialize
Data : bnpy Data object whose dimensions must match resulting hmodel
initname : string name for the routine to use
'truelabels' or 'repeattruelabels'
'''
PRNG = np.random.RandomState(seed)
if initname.count('truelabels') > 0:
if hasattr(Data, 'TrueLabels'):
resp = calc_resp_from_true_labels(Data)
elif hasattr(Data, 'TrueParams'):
if 'resp' in Data.TrueParams:
resp = Data.TrueParams['resp']
if 'word_variational' in Data.TrueParams:
resp = Data.TrueParams['word_variational']
if initname == 'truelabels':
pass # have everything we need
elif initname == 'repeattruelabels':
Ktrue = resp.shape[1]
rowIDs = PRNG.permutation(Data.nObs)
L = len(rowIDs)/nRepeatTrue
bigResp = np.zeros((Data.nObs, Ktrue*nRepeatTrue))
curLoc = 0
for r in range(nRepeatTrue):
targetIDs = rowIDs[curLoc:curLoc+L]
bigResp[targetIDs, r*Ktrue:(r+1)*Ktrue] = resp[targetIDs,:]
curLoc += L
resp = bigResp
elif initname == 'trueparams':
hmodel.set_global_params(**Data.TrueParams)
return
else:
raise NotImplementedError('Unknown initname: %s' % (initname))
if hmodel.obsModel.__class__.__name__.count('Gauss') > 0:
LP = dict(resp=resp)
else:
LP = FromScratchMult.getLPfromResp(resp, Data)
SS = hmodel.get_global_suff_stats(Data, LP)
hmodel.update_global_params(SS)
def calc_resp_from_true_labels(Data):
TrueLabels = Data.TrueLabels
uniqueLabels = np.unique(TrueLabels)
Ktrue = len(uniqueLabels)
resp = np.zeros((Data.nObs, Ktrue))
for k in range(Ktrue):
mask = TrueLabels == uniqueLabels[k]
resp[mask,k] = 1.0
return resp | mit | -7,890,607,368,942,962,000 | 30.328571 | 85 | 0.671989 | false |
leppa/home-assistant | homeassistant/components/person/__init__.py | 1 | 17106 | """Support for tracking people."""
from collections import OrderedDict
from itertools import chain
import logging
from typing import Optional
import uuid
import voluptuous as vol
from homeassistant.auth import EVENT_USER_REMOVED
from homeassistant.components import websocket_api
from homeassistant.components.device_tracker import (
ATTR_SOURCE_TYPE,
DOMAIN as DEVICE_TRACKER_DOMAIN,
SOURCE_TYPE_GPS,
)
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_ID,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
STATE_HOME,
STATE_NOT_HOME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import Event, State, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
ATTR_EDITABLE = "editable"
ATTR_SOURCE = "source"
ATTR_USER_ID = "user_id"
CONF_DEVICE_TRACKERS = "device_trackers"
CONF_USER_ID = "user_id"
DOMAIN = "person"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
SAVE_DELAY = 10
# Device tracker states to ignore
IGNORE_STATES = (STATE_UNKNOWN, STATE_UNAVAILABLE)
PERSON_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_USER_ID): cv.string,
vol.Optional(CONF_DEVICE_TRACKERS, default=[]): vol.All(
cv.ensure_list, cv.entities_domain(DEVICE_TRACKER_DOMAIN)
),
}
)
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): vol.All(cv.ensure_list, cv.remove_falsy, [PERSON_SCHEMA])},
extra=vol.ALLOW_EXTRA,
)
_UNDEF = object()
@bind_hass
async def async_create_person(hass, name, *, user_id=None, device_trackers=None):
"""Create a new person."""
await hass.data[DOMAIN].async_create_person(
name=name, user_id=user_id, device_trackers=device_trackers
)
class PersonManager:
"""Manage person data."""
def __init__(
self, hass: HomeAssistantType, component: EntityComponent, config_persons
):
"""Initialize person storage."""
self.hass = hass
self.component = component
self.store = Store(hass, STORAGE_VERSION, STORAGE_KEY)
self.storage_data = None
config_data = self.config_data = OrderedDict()
for conf in config_persons:
person_id = conf[CONF_ID]
if person_id in config_data:
_LOGGER.error("Found config user with duplicate ID: %s", person_id)
continue
config_data[person_id] = conf
@property
def storage_persons(self):
"""Iterate over persons stored in storage."""
return list(self.storage_data.values())
@property
def config_persons(self):
"""Iterate over persons stored in config."""
return list(self.config_data.values())
async def async_initialize(self):
"""Get the person data."""
raw_storage = await self.store.async_load()
if raw_storage is None:
raw_storage = {"persons": []}
storage_data = self.storage_data = OrderedDict()
for person in raw_storage["persons"]:
storage_data[person[CONF_ID]] = person
entities = []
seen_users = set()
for person_conf in self.config_data.values():
person_id = person_conf[CONF_ID]
user_id = person_conf.get(CONF_USER_ID)
if user_id is not None:
if await self.hass.auth.async_get_user(user_id) is None:
_LOGGER.error("Invalid user_id detected for person %s", person_id)
continue
if user_id in seen_users:
_LOGGER.error(
"Duplicate user_id %s detected for person %s",
user_id,
person_id,
)
continue
seen_users.add(user_id)
entities.append(Person(person_conf, False))
# To make sure IDs don't overlap between config/storage
seen_persons = set(self.config_data)
for person_conf in storage_data.values():
person_id = person_conf[CONF_ID]
user_id = person_conf[CONF_USER_ID]
if person_id in seen_persons:
_LOGGER.error(
"Skipping adding person from storage with same ID as"
" configuration.yaml entry: %s",
person_id,
)
continue
if user_id is not None and user_id in seen_users:
_LOGGER.error(
"Duplicate user_id %s detected for person %s", user_id, person_id
)
continue
# To make sure all users have just 1 person linked.
seen_users.add(user_id)
entities.append(Person(person_conf, True))
if entities:
await self.component.async_add_entities(entities)
self.hass.bus.async_listen(EVENT_USER_REMOVED, self._user_removed)
async def async_create_person(self, *, name, device_trackers=None, user_id=None):
"""Create a new person."""
if not name:
raise ValueError("Name is required")
if user_id is not None:
await self._validate_user_id(user_id)
person = {
CONF_ID: uuid.uuid4().hex,
CONF_NAME: name,
CONF_USER_ID: user_id,
CONF_DEVICE_TRACKERS: device_trackers or [],
}
self.storage_data[person[CONF_ID]] = person
self._async_schedule_save()
await self.component.async_add_entities([Person(person, True)])
return person
async def async_update_person(
self, person_id, *, name=_UNDEF, device_trackers=_UNDEF, user_id=_UNDEF
):
"""Update person."""
current = self.storage_data.get(person_id)
if current is None:
raise ValueError("Invalid person specified.")
changes = {
key: value
for key, value in (
(CONF_NAME, name),
(CONF_DEVICE_TRACKERS, device_trackers),
(CONF_USER_ID, user_id),
)
if value is not _UNDEF and current[key] != value
}
if CONF_USER_ID in changes and user_id is not None:
await self._validate_user_id(user_id)
self.storage_data[person_id].update(changes)
self._async_schedule_save()
for entity in self.component.entities:
if entity.unique_id == person_id:
entity.person_updated()
break
return self.storage_data[person_id]
async def async_delete_person(self, person_id):
"""Delete person."""
if person_id not in self.storage_data:
raise ValueError("Invalid person specified.")
self.storage_data.pop(person_id)
self._async_schedule_save()
ent_reg = await self.hass.helpers.entity_registry.async_get_registry()
for entity in self.component.entities:
if entity.unique_id == person_id:
await entity.async_remove()
ent_reg.async_remove(entity.entity_id)
break
@callback
def _async_schedule_save(self) -> None:
"""Schedule saving the area registry."""
self.store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> dict:
"""Return data of area registry to store in a file."""
return {"persons": list(self.storage_data.values())}
async def _validate_user_id(self, user_id):
"""Validate the used user_id."""
if await self.hass.auth.async_get_user(user_id) is None:
raise ValueError("User does not exist")
if any(
person
for person in chain(self.storage_data.values(), self.config_data.values())
if person.get(CONF_USER_ID) == user_id
):
raise ValueError("User already taken")
async def _user_removed(self, event: Event):
"""Handle event that a person is removed."""
user_id = event.data["user_id"]
for person in self.storage_data.values():
if person[CONF_USER_ID] == user_id:
await self.async_update_person(person_id=person[CONF_ID], user_id=None)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the person component."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
conf_persons = config.get(DOMAIN, [])
manager = hass.data[DOMAIN] = PersonManager(hass, component, conf_persons)
await manager.async_initialize()
websocket_api.async_register_command(hass, ws_list_person)
websocket_api.async_register_command(hass, ws_create_person)
websocket_api.async_register_command(hass, ws_update_person)
websocket_api.async_register_command(hass, ws_delete_person)
return True
class Person(RestoreEntity):
"""Represent a tracked person."""
def __init__(self, config, editable):
"""Set up person."""
self._config = config
self._editable = editable
self._latitude = None
self._longitude = None
self._gps_accuracy = None
self._source = None
self._state = None
self._unsub_track_device = None
@property
def name(self):
"""Return the name of the entity."""
return self._config[CONF_NAME]
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def state(self):
"""Return the state of the person."""
return self._state
@property
def state_attributes(self):
"""Return the state attributes of the person."""
data = {ATTR_EDITABLE: self._editable, ATTR_ID: self.unique_id}
if self._latitude is not None:
data[ATTR_LATITUDE] = self._latitude
if self._longitude is not None:
data[ATTR_LONGITUDE] = self._longitude
if self._gps_accuracy is not None:
data[ATTR_GPS_ACCURACY] = self._gps_accuracy
if self._source is not None:
data[ATTR_SOURCE] = self._source
user_id = self._config.get(CONF_USER_ID)
if user_id is not None:
data[ATTR_USER_ID] = user_id
return data
@property
def unique_id(self):
"""Return a unique ID for the person."""
return self._config[CONF_ID]
async def async_added_to_hass(self):
"""Register device trackers."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._parse_source_state(state)
if self.hass.is_running:
# Update person now if hass is already running.
self.person_updated()
else:
# Wait for hass start to not have race between person
# and device trackers finishing setup.
@callback
def person_start_hass(now):
self.person_updated()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, person_start_hass
)
@callback
def person_updated(self):
"""Handle when the config is updated."""
if self._unsub_track_device is not None:
self._unsub_track_device()
self._unsub_track_device = None
trackers = self._config.get(CONF_DEVICE_TRACKERS)
if trackers:
_LOGGER.debug("Subscribe to device trackers for %s", self.entity_id)
self._unsub_track_device = async_track_state_change(
self.hass, trackers, self._async_handle_tracker_update
)
self._update_state()
@callback
def _async_handle_tracker_update(self, entity, old_state, new_state):
"""Handle the device tracker state changes."""
self._update_state()
@callback
def _update_state(self):
"""Update the state."""
latest_non_gps_home = latest_not_home = latest_gps = latest = None
for entity_id in self._config.get(CONF_DEVICE_TRACKERS, []):
state = self.hass.states.get(entity_id)
if not state or state.state in IGNORE_STATES:
continue
if state.attributes.get(ATTR_SOURCE_TYPE) == SOURCE_TYPE_GPS:
latest_gps = _get_latest(latest_gps, state)
elif state.state == STATE_HOME:
latest_non_gps_home = _get_latest(latest_non_gps_home, state)
elif state.state == STATE_NOT_HOME:
latest_not_home = _get_latest(latest_not_home, state)
if latest_non_gps_home:
latest = latest_non_gps_home
elif latest_gps:
latest = latest_gps
else:
latest = latest_not_home
if latest:
self._parse_source_state(latest)
else:
self._state = None
self._source = None
self._latitude = None
self._longitude = None
self._gps_accuracy = None
self.async_schedule_update_ha_state()
@callback
def _parse_source_state(self, state):
"""Parse source state and set person attributes.
This is a device tracker state or the restored person state.
"""
self._state = state.state
self._source = state.entity_id
self._latitude = state.attributes.get(ATTR_LATITUDE)
self._longitude = state.attributes.get(ATTR_LONGITUDE)
self._gps_accuracy = state.attributes.get(ATTR_GPS_ACCURACY)
@websocket_api.websocket_command({vol.Required("type"): "person/list"})
def ws_list_person(
hass: HomeAssistantType, connection: websocket_api.ActiveConnection, msg
):
"""List persons."""
manager: PersonManager = hass.data[DOMAIN]
connection.send_result(
msg["id"],
{"storage": manager.storage_persons, "config": manager.config_persons},
)
@websocket_api.websocket_command(
{
vol.Required("type"): "person/create",
vol.Required("name"): vol.All(str, vol.Length(min=1)),
vol.Optional("user_id"): vol.Any(str, None),
vol.Optional("device_trackers", default=[]): vol.All(
cv.ensure_list, cv.entities_domain(DEVICE_TRACKER_DOMAIN)
),
}
)
@websocket_api.require_admin
@websocket_api.async_response
async def ws_create_person(
hass: HomeAssistantType, connection: websocket_api.ActiveConnection, msg
):
"""Create a person."""
manager: PersonManager = hass.data[DOMAIN]
try:
person = await manager.async_create_person(
name=msg["name"],
user_id=msg.get("user_id"),
device_trackers=msg["device_trackers"],
)
connection.send_result(msg["id"], person)
except ValueError as err:
connection.send_error(
msg["id"], websocket_api.const.ERR_INVALID_FORMAT, str(err)
)
@websocket_api.websocket_command(
{
vol.Required("type"): "person/update",
vol.Required("person_id"): str,
vol.Required("name"): vol.All(str, vol.Length(min=1)),
vol.Optional("user_id"): vol.Any(str, None),
vol.Optional(CONF_DEVICE_TRACKERS, default=[]): vol.All(
cv.ensure_list, cv.entities_domain(DEVICE_TRACKER_DOMAIN)
),
}
)
@websocket_api.require_admin
@websocket_api.async_response
async def ws_update_person(
hass: HomeAssistantType, connection: websocket_api.ActiveConnection, msg
):
"""Update a person."""
manager: PersonManager = hass.data[DOMAIN]
changes = {}
for key in ("name", "user_id", "device_trackers"):
if key in msg:
changes[key] = msg[key]
try:
person = await manager.async_update_person(msg["person_id"], **changes)
connection.send_result(msg["id"], person)
except ValueError as err:
connection.send_error(
msg["id"], websocket_api.const.ERR_INVALID_FORMAT, str(err)
)
@websocket_api.websocket_command(
{vol.Required("type"): "person/delete", vol.Required("person_id"): str}
)
@websocket_api.require_admin
@websocket_api.async_response
async def ws_delete_person(
hass: HomeAssistantType, connection: websocket_api.ActiveConnection, msg
):
"""Delete a person."""
manager: PersonManager = hass.data[DOMAIN]
await manager.async_delete_person(msg["person_id"])
connection.send_result(msg["id"])
def _get_latest(prev: Optional[State], curr: State):
"""Get latest state."""
if prev is None or curr.last_updated > prev.last_updated:
return curr
return prev
| apache-2.0 | -8,455,843,230,876,657,000 | 31.214689 | 87 | 0.603882 | false |
pszemus/grpc | examples/python/debug/get_stats.py | 1 | 1475 | # Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Poll statistics from the server."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import argparse
import grpc
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
def run(addr):
with grpc.insecure_channel(addr) as channel:
channelz_stub = channelz_pb2_grpc.ChannelzStub(channel)
response = channelz_stub.GetServers(
channelz_pb2.GetServersRequest(start_server_id=0))
print('Info for all servers: %s' % response)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--addr',
nargs=1,
type=str,
default='[::]:50051',
help='the address to request')
args = parser.parse_args()
run(addr=args.addr)
if __name__ == '__main__':
logging.basicConfig()
main()
| apache-2.0 | -7,507,955,330,443,291,000 | 28.5 | 74 | 0.697627 | false |
amjames/psi4 | psi4/driver/driver.py | 1 | 119607 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with a *procedures* dictionary specifying available quantum
chemical methods and functions driving the main quantum chemical
functionality, namely single-point energies, geometry optimizations,
properties, and vibrational frequency calculations.
"""
from __future__ import print_function
from __future__ import absolute_import
import os
import re
import sys
import json
import shutil
import numpy as np
from psi4.driver import driver_util
from psi4.driver import driver_cbs
from psi4.driver import driver_nbody
from psi4.driver import p4util
from psi4.driver import qcdb
from psi4.driver.procrouting import *
from psi4.driver.p4util.exceptions import *
# never import wrappers or aliases into this file
def _find_derivative_type(ptype, method_name, user_dertype):
r"""
Figures out the derivative type (0, 1, 2) for a given method_name. Will
first use user default and then the highest available derivative type for
a given method.
"""
if ptype not in ['gradient', 'hessian']:
raise ValidationError("_find_derivative_type: ptype must either be gradient or hessian.")
dertype = "(auto)"
# If user type is None, try to find the highest derivative
if user_dertype is None:
if (ptype == 'hessian') and (method_name in procedures['hessian']):
dertype = 2
# Will need special logic if we ever have managed Hessians
elif method_name in procedures['gradient']:
dertype = 1
if procedures['gradient'][method_name].__name__.startswith('select_'):
try:
procedures['gradient'][method_name](method_name, probe=True)
except ManagedMethodError:
dertype = 0
elif method_name in procedures['energy']:
dertype = 0
else:
# Quick sanity check. Only *should* be able to be None or int, but hey, kids today...
if not isinstance(user_dertype, int):
raise ValidationError("_find_derivative_type: user_dertype should only be None or int!")
dertype = user_dertype
if (core.get_global_option('INTEGRAL_PACKAGE') == 'ERD') and (dertype != 0):
raise ValidationError('INTEGRAL_PACKAGE ERD does not play nicely with derivatives, so stopping.')
if (core.get_global_option('PCM')) and (dertype != 0):
core.print_out('\nPCM analytic gradients are not implemented yet, re-routing to finite differences.\n')
dertype = 0
# Summary validation
if (dertype == 2) and (method_name in procedures['hessian']):
pass
elif (dertype == 1) and (method_name in procedures['gradient']):
pass
elif (dertype == 0) and (method_name in procedures['energy']):
pass
else:
alternatives = ''
alt_method_name = p4util.text.find_approximate_string_matches(method_name, procedures['energy'].keys(), 2)
if len(alt_method_name) > 0:
alternatives = """ Did you mean? %s""" % (' '.join(alt_method_name))
raise ValidationError("""Derivative method 'name' %s and derivative level 'dertype' %s are not available.%s"""
% (method_name, str(dertype), alternatives))
return dertype
def _energy_is_invariant(gradient, stationary_criterion=1.e-2):
"""Polls options and probes `gradient` to return whether current method
and system expected to be invariant to translations and rotations of
the coordinate system.
"""
stationary_point = gradient.rms() < stationary_criterion # 1.e-2 pulled out of a hat
efp = core.get_active_efp()
efp_present = efp.nfragments() > 0
translations_projection_sound = (not core.get_option('SCF', 'EXTERN') and
not core.get_option('SCF', 'PERTURB_H') and
not efp_present)
rotations_projection_sound = (translations_projection_sound and
stationary_point)
return translations_projection_sound, rotations_projection_sound
def energy(name, **kwargs):
r"""Function to compute the single-point electronic energy.
:returns: *float* |w--w| Total electronic energy in Hartrees. SAPT & EFP return interaction energy.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`CURRENT ENERGY <CURRENTENERGY>`
* :psivar:`CURRENT REFERENCE ENERGY <CURRENTREFERENCEENERGY>`
* :psivar:`CURRENT CORRELATION ENERGY <CURRENTCORRELATIONENERGY>`
:type name: string
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
:type restart_file: string
:param restart_file: ``['file.1, file.32]`` || ``./file`` || etc.
Binary data files to be renamed for calculation restart.
.. _`table:energy_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| efp | effective fragment potential (EFP) :ref:`[manual] <sec:libefp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scf | Hartree--Fock (HF) or density functional theory (DFT) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf | HF self consistent field (SCF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf3c | HF with dispersion, BSSE, and basis set corrections :ref:`[manual] <sec:gcp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| pbeh3c | PBEh with dispersion, BSSE, and basis set corrections :ref:`[manual] <sec:gcp>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dcft | density cumulant functional theory :ref:`[manual] <sec:dcft>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2 | 2nd-order |MollerPlesset| perturbation theory (MP2) :ref:`[manual] <sec:dfmp2>` :ref:`[details] <tlmp2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp3 | 3rd-order |MollerPlesset| perturbation theory (MP3) :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp3>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp3 | MP3 with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2.5 | average of MP2 and MP3 :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp25>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp4(sdq) | 4th-order MP perturbation theory (MP4) less triples :ref:`[manual] <sec:fnompn>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp4(sdq) | MP4 (less triples) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp4 | full MP4 :ref:`[manual] <sec:fnompn>` :ref:`[details] <tlmp4>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-mp4 | full MP4 with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp\ *n* | *n*\ th-order |MollerPlesset| (MP) perturbation theory :ref:`[manual] <sec:arbpt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| zapt\ *n* | *n*\ th-order z-averaged perturbation theory (ZAPT) :ref:`[manual] <sec:arbpt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2 | orbital-optimized second-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp2 | spin-component scaled OMP2 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs(n)-omp2 | a special version of SCS-OMP2 for nucleobase interactions :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp2-vdw | a special version of SCS-OMP2 (from ethene dimers) :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-omp2 | spin-opposite scaled OMP2 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-pi-omp2 | A special version of SOS-OMP2 for pi systems :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp3 | orbital-optimized third-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp3 | spin-component scaled OMP3 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs(n)-omp3 | a special version of SCS-OMP3 for nucleobase interactions :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scs-omp3-vdw | a special version of SCS-OMP3 (from ethene dimers) :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-omp3 | spin-opposite scaled OMP3 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sos-pi-omp3 | A special version of SOS-OMP3 for pi systems :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2.5 | orbital-optimized MP2.5 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccsd, cepa(0) | coupled electron pair approximation variant 0 :ref:`[manual] <sec:fnocepa>` :ref:`[details] <tllccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-lccsd, fno-cepa(0) | CEPA(0) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cepa(1) | coupled electron pair approximation variant 1 :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cepa(1) | CEPA(1) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cepa(3) | coupled electron pair approximation variant 3 :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cepa(3) | CEPA(3) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| acpf | averaged coupled-pair functional :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-acpf | ACPF with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| aqcc | averaged quadratic coupled cluster :ref:`[manual] <sec:fnocepa>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-aqcc | AQCC with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| qcisd | quadratic CI singles doubles (QCISD) :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-qcisd | QCISD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccd | Linear CCD :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tllccd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-lccd | LCCD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| olccd | orbital optimized LCCD :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cc2 | approximate coupled cluster singles and doubles (CC2) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccd | coupled cluster doubles (CCD) :ref:`[manual] <sec:occ_nonoo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd | coupled cluster singles and doubles (CCSD) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| bccd | Brueckner coupled cluster doubles (BCCD) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-ccsd | CCSD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| qcisd(t) | QCISD with perturbative triples :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-qcisd(t) | QCISD(T) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(t) | CCSD with perturbative triples (CCSD(T)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(at) | CCSD with asymmetric perturbative triples (CCSD(AT)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdat>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| bccd(t) | BCCD with perturbative triples :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-ccsd(t) | CCSD(T) with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cc3 | approximate CC singles, doubles, and triples (CC3) :ref:`[manual] <sec:cc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccenergy | **expert** full control over ccenergy module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dfocc | **expert** full control over dfocc module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisd | configuration interaction (CI) singles and doubles (CISD) :ref:`[manual] <sec:ci>` :ref:`[details] <tlcisd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fno-cisd | CISD with frozen natural orbitals :ref:`[manual] <sec:fnocc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisdt | CI singles, doubles, and triples (CISDT) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| cisdtq | CI singles, doubles, triples, and quadruples (CISDTQ) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ci\ *n* | *n*\ th-order CI :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fci | full configuration interaction (FCI) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| detci | **expert** full control over detci module |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| casscf | complete active space self consistent field (CASSCF) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| rasscf | restricted active space self consistent field (RASSCF) :ref:`[manual] <sec:ci>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mcscf | multiconfigurational self consistent field (SCF) :ref:`[manual] <sec:psimrcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| psimrcc | Mukherjee multireference coupled cluster (Mk-MRCC) :ref:`[manual] <sec:psimrcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-scf | density matrix renormalization group SCF :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-caspt2 | density matrix renormalization group CASPT2 :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dmrg-ci | density matrix renormalization group CI :ref:`[manual] <sec:chemps2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt0 | 0th-order symmetry adapted perturbation theory (SAPT) :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ssapt0 | 0th-order SAPT with special exchange scaling :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| fisapt0 | 0th-order functional and/or intramolecular SAPT :ref:`[manual] <sec:fisapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2 | 2nd-order SAPT, traditional definition :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+ | SAPT including all 2nd-order terms :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3) | SAPT including perturbative triples :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3 | SAPT including all 3rd-order terms :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd) | SAPT2+ with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd) | SAPT2+(3) with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd) | SAPT2+3 with CC-based dispersion :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+dmp2 | SAPT including all 2nd-order terms and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)dmp2 | SAPT including perturbative triples and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3dmp2 | SAPT including all 3rd-order terms and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd)dmp2 | SAPT2+ with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd)dmp2 | SAPT2+(3) with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd)dmp2 | SAPT2+3 with CC-based dispersion and MP2 correction :ref:`[manual] <sec:sapt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt0-ct | 0th-order SAPT plus charge transfer (CT) calculation :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2-ct | SAPT2 plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+-ct | SAPT2+ plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)-ct | SAPT2+(3) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3-ct | SAPT2+3 plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(ccd)-ct | SAPT2+(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+(3)(ccd)-ct | SAPT2+(3)(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| sapt2+3(ccd)-ct | SAPT2+3(CCD) plus CT :ref:`[manual] <sec:saptct>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| adc | 2nd-order algebraic diagrammatic construction (ADC) :ref:`[manual] <sec:adc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-cc2 | EOM-CC2 :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-ccsd | equation of motion (EOM) CCSD :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-cc3 | EOM-CC3 :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
.. comment missing and why
.. comment a certain isapt --- marginally released
.. comment mrcc --- this is handled in its own table
.. comment psimrcc_scf --- convenience fn
.. include:: ../autodoc_dft_energy.rst
.. include:: ../mrcc_table_energy.rst
.. include:: ../cfour_table_energy.rst
:examples:
>>> # [1] Coupled-cluster singles and doubles calculation with psi code
>>> energy('ccsd')
>>> # [2] Charge-transfer SAPT calculation with scf projection from small into
>>> # requested basis, with specified projection fitting basis
>>> set basis_guess true
>>> set df_basis_guess jun-cc-pVDZ-JKFIT
>>> energy('sapt0-ct')
>>> # [3] Arbitrary-order MPn calculation
>>> energy('mp7')
>>> # [4] Converge scf as singlet, then run detci as triplet upon singlet reference
>>> # Note that the integral transformation is not done automatically when detci is run in a separate step.
>>> molecule H2 {\n0 1\nH\nH 1 0.74\n}
>>> set basis cc-pVDZ
>>> set reference rohf
>>> scf_e, scf_wfn = energy('scf', return_wfn=True)
>>> H2.set_multiplicity(3)
>>> core.MintsHelper(scf_wfn.basisset()).integrals()
>>> energy('detci', ref_wfn=scf_wfn)
>>> # [5] Run two CI calculations, keeping the integrals generated in the first one.
>>> molecule ne {\nNe\n}
>>> set basis cc-pVDZ
>>> cisd_e, cisd_wfn = energy('cisd', return_wfn=True)
>>> energy('fci', ref_wfn=cisd_wfn)
>>> # [6] Can automatically perform complete basis set extrapolations
>>> energy("CCSD/cc-pV[DT]Z")
>>> # [7] Can automatically perform delta corrections that include extrapolations
>>> # even with a user-defined extrapolation formula. See sample inputs named
>>> # cbs-xtpl* for more examples of this input style
>>> energy("MP2/aug-cc-pv([d,t]+d)z + d:ccsd(t)/cc-pvdz", corl_scheme=myxtplfn_2)
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce if name is function
if hasattr(name, '__call__'):
return name(energy, kwargs.pop('label', 'custom function'), ptype='energy', **kwargs)
# Allow specification of methods to arbitrary order
lowername = name.lower()
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
# Bounce to CP if bsse kwarg
if kwargs.get('bsse_type', None) is not None:
return driver_nbody.nbody_gufunc(energy, name, ptype='energy', **kwargs)
# Bounce to CBS if "method/basis" name
if "/" in lowername:
return driver_cbs._cbs_gufunc(energy, name, ptype='energy', **kwargs)
# Commit to procedures['energy'] call hereafter
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
#for precallback in hooks['energy']['pre']:
# precallback(lowername, **kwargs)
optstash = driver_util._set_convergence_criterion('energy', lowername, 6, 8, 6, 8, 6)
# Before invoking the procedure, we rename any file that should be read.
# This is a workaround to do restarts with the current PSI4 capabilities
# before actual, clean restarts are put in there
# Restartfile is always converted to a single-element list if
# it contains a single string
# DGAS Note: This is hacked together at this point and should be revamped.
if 'restart_file' in kwargs:
restartfile = kwargs['restart_file'] # Option still available for procedure-specific action
if not isinstance(restartfile, (list, tuple)):
restartfile = (restartfile, )
# Rename the files to be read to be consistent with psi4's file system
for item in restartfile:
name_split = re.split(r'\.', item)
if "npz" in item:
fname = os.path.split(os.path.abspath(core.get_writer_file_prefix(molecule.name())))[1]
psi_scratch = core.IOManager.shared_object().get_default_path()
file_num = item.split('.')[-2]
targetfile = os.path.join(psi_scratch, fname + "." + file_num + ".npz")
else:
filenum = name_split[-1]
try:
filenum = int(filenum)
except ValueError:
filenum = 32 # Default file number is the checkpoint one
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
filepath = psioh.get_file_path(filenum)
namespace = psio.get_default_namespace()
pid = str(os.getpid())
prefix = 'psi'
targetfile = filepath + prefix + '.' + pid + '.' + namespace + '.' + str(filenum)
shutil.copy(item, targetfile)
wfn = procedures['energy'][lowername](lowername, molecule=molecule, **kwargs)
for postcallback in hooks['energy']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
optstash.restore()
if return_wfn: # TODO current energy safer than wfn.energy() for now, but should be revisited
# TODO place this with the associated call, very awkward to call this in other areas at the moment
if lowername in ['efp', 'mrcc', 'dmrg', 'psimrcc']:
core.print_out("\n\nWarning! %s does not have an associated derived wavefunction." % name)
core.print_out("The returned wavefunction is the incoming reference wavefunction.\n\n")
elif 'sapt' in lowername:
core.print_out("\n\nWarning! %s does not have an associated derived wavefunction." % name)
core.print_out("The returned wavefunction is the dimer SCF wavefunction.\n\n")
return (core.get_variable('CURRENT ENERGY'), wfn)
else:
return core.get_variable('CURRENT ENERGY')
def gradient(name, **kwargs):
r"""Function complementary to :py:func:~driver.optimize(). Carries out one gradient pass,
deciding analytic or finite difference.
:returns: :py:class:`~psi4.core.Matrix` |w--w| Total electronic gradient in Hartrees/Bohr.
:returns: (:py:class:`~psi4.core.Matrix`, :py:class:`~psi4.core.Wavefunction`) |w--w| gradient and wavefunction when **return_wfn** specified.
:examples:
>>> # [1] Single-point dft gradient getting the gradient
>>> # in file, core.Matrix, and np.array forms
>>> set gradient_write on
>>> G, wfn = gradient('b3lyp-d', return_wfn=True)
>>> wfn.gradient().print_out()
>>> np.array(G)
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce to CP if bsse kwarg (someday)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("Gradient: Cannot specify bsse_type for gradient yet.")
# Figure out what kind of gradient this is
if hasattr(name, '__call__'):
if name.__name__ in ['cbs', 'complete_basis_set']:
gradient_type = 'cbs_wrapper'
else:
# Bounce to name if name is non-CBS function
gradient_type = 'custom_function'
elif '/' in name:
gradient_type = 'cbs_gufunc'
else:
gradient_type = 'conventional'
# Figure out lowername, dertype, and func
# If we have analytical gradients we want to pass to our wrappers, otherwise we want to run
# finite-diference energy or cbs energies
# TODO MP5/cc-pv[DT]Z behavior unkown due to "levels"
user_dertype = kwargs.pop('dertype', None)
if gradient_type == 'custom_function':
if user_dertype is None:
dertype = 0
core.print_out("\nGradient: Custom function passed in without a defined dertype, assuming fd-energy based gradient.\n")
else:
core.print_out("\nGradient: Custom function passed in with a dertype of %d\n" % user_dertype)
dertype = user_dertype
if dertype == 1:
return name(gradient, kwargs.pop('label', 'custom function'), ptype='gradient', **kwargs)
else:
optstash = driver_util._set_convergence_criterion('energy', 'scf', 8, 10, 8, 10, 8)
lowername = name
elif gradient_type == 'cbs_wrapper':
cbs_methods = driver_cbs._cbs_wrapper_methods(**kwargs)
dertype = min([_find_derivative_type('gradient', method, user_dertype) for method in cbs_methods])
if dertype == 1:
# Bounce to CBS (directly) in pure-gradient mode if name is CBS and all parts have analytic grad. avail.
return name(gradient, kwargs.pop('label', 'custom function'), ptype='gradient', **kwargs)
else:
optstash = driver_util._set_convergence_criterion('energy', cbs_methods[0], 8, 10, 8, 10, 8)
lowername = name
# Pass through to G by E
elif gradient_type == 'cbs_gufunc':
cbs_methods = driver_cbs._parse_cbs_gufunc_string(name.lower())[0]
dertype = min([_find_derivative_type('gradient', method, user_dertype) for method in cbs_methods])
lowername = name.lower()
if dertype == 1:
# Bounce to CBS in pure-gradient mode if "method/basis" name and all parts have analytic grad. avail.
return driver_cbs._cbs_gufunc(gradient, name, ptype='gradient', **kwargs)
else:
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash = driver_util._set_convergence_criterion('energy', cbs_methods[0], 8, 10, 8, 10, 8)
else:
# Allow specification of methods to arbitrary order
lowername = name.lower()
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
# Prevent methods that do not have associated gradients
if lowername in energy_only_methods:
raise ValidationError("gradient('%s') does not have an associated gradient" % name)
dertype = _find_derivative_type('gradient', lowername, user_dertype)
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash = driver_util._set_convergence_criterion('energy', lowername, 8, 10, 8, 10, 8)
# Commit to procedures[] call hereafter
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
# no analytic derivatives for scf_type cd
if core.get_global_option('SCF_TYPE') == 'CD':
if (dertype == 1):
raise ValidationError("""No analytic derivatives for SCF_TYPE CD.""")
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# S/R: Mode of operation- whether finite difference opt run in one job or files farmed out
opt_mode = kwargs.get('mode', 'continuous').lower()
if opt_mode == 'continuous':
pass
elif opt_mode == 'sow':
if dertype == 1:
raise ValidationError("""Optimize execution mode 'sow' not valid for analytic gradient calculation.""")
elif opt_mode == 'reap':
opt_linkage = kwargs.get('linkage', None)
if opt_linkage is None:
raise ValidationError("""Optimize execution mode 'reap' requires a linkage option.""")
else:
raise ValidationError("""Optimize execution mode '%s' not valid.""" % (opt_mode))
# Does dertype indicate an analytic procedure both exists and is wanted?
if dertype == 1:
core.print_out("""gradient() will perform analytic gradient computation.\n""")
# Perform the gradient calculation
wfn = procedures['gradient'][lowername](lowername, molecule=molecule, **kwargs)
optstash.restore()
if return_wfn:
return (wfn.gradient(), wfn)
else:
return wfn.gradient()
else:
core.print_out("""gradient() will perform gradient computation by finite difference of analytic energies.\n""")
opt_iter = kwargs.get('opt_iter', 1)
if opt_iter is True:
opt_iter = 1
if opt_iter == 1:
print('Performing finite difference calculations')
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
# Obtain list of displacements
# print("about to generate displacements")
displacements = core.fd_geoms_1_0(moleculeclone)
# print(displacements)
ndisp = len(displacements)
# print("generated displacments")
# This version is pretty dependent on the reference geometry being last (as it is now)
print(""" %d displacements needed ...""" % (ndisp), end='')
energies = []
# S/R: Write instructions for sow/reap procedure to output file and reap input file
if opt_mode == 'sow':
instructionsO = """\n The optimization sow/reap procedure has been selected through mode='sow'. In addition\n"""
instructionsO += """ to this output file (which contains no quantum chemical calculations), this job\n"""
instructionsO += """ has produced a number of input files (OPT-%s-*.in) for individual components\n""" % (str(opt_iter))
instructionsO += """ and a single input file (OPT-master.in) with an optimize(mode='reap') command.\n"""
instructionsO += """ These files may look very peculiar since they contain processed and pickled python\n"""
instructionsO += """ rather than normal input. Follow the instructions in OPT-master.in to continue.\n\n"""
instructionsO += """ Alternatively, a single-job execution of the gradient may be accessed through\n"""
instructionsO += """ the optimization wrapper option mode='continuous'.\n\n"""
core.print_out(instructionsO)
instructionsM = """\n# Follow the instructions below to carry out this optimization cycle.\n#\n"""
instructionsM += """# (1) Run all of the OPT-%s-*.in input files on any variety of computer architecture.\n""" % (str(opt_iter))
instructionsM += """# The output file names must be as given below.\n#\n"""
for rgt in range(ndisp):
pre = 'OPT-' + str(opt_iter) + '-' + str(rgt + 1)
instructionsM += """# psi4 -i %-27s -o %-27s\n""" % (pre + '.in', pre + '.out')
instructionsM += """#\n# (2) Gather all the resulting output files in a directory. Place input file\n"""
instructionsM += """# OPT-master.in into that directory and run it. The job will be minimal in\n"""
instructionsM += """# length and give summary results for the gradient step in its output file.\n#\n"""
if opt_iter == 1:
instructionsM += """# psi4 -i %-27s -o %-27s\n#\n""" % ('OPT-master.in', 'OPT-master.out')
else:
instructionsM += """# psi4 -a -i %-27s -o %-27s\n#\n""" % ('OPT-master.in', 'OPT-master.out')
instructionsM += """# After each optimization iteration, the OPT-master.in file is overwritten so return here\n"""
instructionsM += """# for new instructions. With the use of the psi4 -a flag, OPT-master.out is not\n"""
instructionsM += """# overwritten and so maintains a history of the job. To use the (binary) optimizer\n"""
instructionsM += """# data file to accelerate convergence, the OPT-master jobs must run on the same computer.\n\n"""
with open('OPT-master.in', 'wb') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the gradient() wrapper.\n\n'.encode('utf-8'))
fmaster.write(p4util.format_molecule_for_input(moleculeclone).encode('utf-8'))
fmaster.write(p4util.format_options_for_input().encode('utf-8'))
p4util.format_kwargs_for_input(fmaster, lmode=2, return_wfn=True, dertype=dertype, **kwargs)
fmaster.write(("""retE, retwfn = optimize('%s', **kwargs)\n\n""" % (lowername)).encode('utf-8'))
fmaster.write(instructionsM.encode('utf-8'))
for n, displacement in enumerate(displacements):
rfile = 'OPT-%s-%s' % (opt_iter, n + 1)
# Build string of title banner
banners = ''
banners += """core.print_out('\\n')\n"""
banners += """p4util.banner(' Gradient %d Computation: Displacement %d ')\n""" % (opt_iter, n + 1)
banners += """core.print_out('\\n')\n\n"""
if opt_mode == 'continuous':
# print progress to file and screen
core.print_out('\n')
p4util.banner('Loading displacement %d of %d' % (n + 1, ndisp))
print(""" %d""" % (n + 1), end=('\n' if (n + 1 == ndisp) else ''))
sys.stdout.flush()
# Load in displacement into the active molecule
moleculeclone.set_geometry(displacement)
# Perform the energy calculation
E, wfn = energy(lowername, return_wfn=True, molecule=moleculeclone, **kwargs)
energies.append(core.get_variable('CURRENT ENERGY'))
# S/R: Write each displaced geometry to an input file
elif opt_mode == 'sow':
moleculeclone.set_geometry(displacement)
# S/R: Prepare molecule, options, and kwargs
with open('%s.in' % (rfile), 'wb') as freagent:
freagent.write('# This is a psi4 input file auto-generated from the gradient() wrapper.\n\n'.encode('utf-8'))
freagent.write(p4util.format_molecule_for_input(moleculeclone).encode('utf-8'))
freagent.write(p4util.format_options_for_input().encode('utf-8'))
p4util.format_kwargs_for_input(freagent, **kwargs)
# S/R: Prepare function call and energy save
freagent.write(("""electronic_energy = energy('%s', **kwargs)\n\n""" % (lowername)).encode('utf-8'))
freagent.write(("""core.print_out('\\nGRADIENT RESULT: computation %d for item %d """ % (os.getpid(), n + 1)).encode('utf-8'))
freagent.write("""yields electronic energy %20.12f\\n' % (electronic_energy))\n\n""".encode('utf-8'))
# S/R: Read energy from each displaced geometry output file and save in energies array
elif opt_mode == 'reap':
exec(banners)
core.set_variable('NUCLEAR REPULSION ENERGY', moleculeclone.nuclear_repulsion_energy())
energies.append(p4util.extract_sowreap_from_output(rfile, 'GRADIENT', n, opt_linkage, True))
# S/R: Quit sow after writing files. Initialize skeleton wfn to receive grad for reap
if opt_mode == 'sow':
optstash.restore()
if return_wfn:
return (None, None) # any point to building a dummy wfn here?
else:
return None
elif opt_mode == 'reap':
core.set_variable('CURRENT ENERGY', energies[-1])
wfn = core.Wavefunction.build(molecule, core.get_global_option('BASIS'))
# Compute the gradient; last item in 'energies' is undisplaced
core.set_local_option('FINDIF', 'GRADIENT_WRITE', True)
G = core.fd_1_0(molecule, energies)
G.print_out()
wfn.set_gradient(G)
optstash.restore()
if return_wfn:
return (wfn.gradient(), wfn)
else:
return wfn.gradient()
def properties(*args, **kwargs):
r"""Function to compute various properties.
:aliases: prop()
:returns: none.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- This function at present has a limited functionality.
Consult the keywords sections of other modules for further property capabilities.
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| Name | Calls Method | Reference | Supported Properties |
+====================+===============================================+================+===============================================================+
| scf | Self-consistent field method(s) | RHF/ROHF/UHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| hf | HF Self-consistent field method(s) | RHF/ROHF/UHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| mp2 | MP2 with density fitting only (mp2_type df) | RHF | Listed :ref:`here <sec:oeprop>` |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| cc2 | 2nd-order approximate CCSD | RHF | dipole, quadrupole, polarizability, rotation, roa_tensor |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| ccsd | Coupled cluster singles and doubles (CCSD) | RHF | dipole, quadrupole, polarizability, rotation, roa_tensor |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| eom-cc2 | 2nd-order approximate EOM-CCSD | RHF | oscillator_strength, rotational_strength |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| eom-ccsd | Equation-of-motion CCSD (EOM-CCSD) | RHF | oscillator_strength, rotational_strength |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| cisd, cisdt, | Configuration interaction | RHF/ROHF | Listed :ref:`here <sec:oeprop>`, transition_dipole, |
| cisdt, cisdtq, | | | transition_quadrupole |
| ci5, ..., fci | | | |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
| casscf, rasscf | Multi-configurational SCF | RHF/ROHF | Listed :ref:`here <sec:oeprop>`, transition_dipole, |
| | | | transition_quadrupole |
+--------------------+-----------------------------------------------+----------------+---------------------------------------------------------------+
:type name: string
:param name: ``'ccsd'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type properties: array of strings
:param properties: |dl| ``[]`` |dr| || ``['rotation', 'polarizability', 'oscillator_strength', 'roa']`` || etc.
Indicates which properties should be computed. Defaults to dipole and quadrupole.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:examples:
>>> # [1] Optical rotation calculation
>>> properties('cc2', properties=['rotation'])
"""
kwargs = p4util.kwargs_lower(kwargs)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
kwargs['molecule'] = molecule
# Allow specification of methods to arbitrary order
lowername = args[0].lower()
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
if "/" in lowername:
return driver_cbs._cbs_gufunc(properties, lowername, ptype='properties', **kwargs)
return_wfn = kwargs.pop('return_wfn', False)
props = kwargs.get('properties', ['dipole', 'quadrupole'])
if len(args) > 1:
props += args[1:]
kwargs['properties'] = p4util.drop_duplicates(props)
optstash = driver_util._set_convergence_criterion('properties', lowername, 6, 10, 6, 10, 8)
wfn = procedures['properties'][lowername](lowername, **kwargs)
optstash.restore()
if return_wfn:
return (core.get_variable('CURRENT ENERGY'), wfn)
else:
return core.get_variable('CURRENT ENERGY')
def optimize(name, **kwargs):
r"""Function to perform a geometry optimization.
:aliases: opt()
:returns: *float* |w--w| Total electronic energy of optimized structure in Hartrees.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:raises: psi4.OptimizationConvergenceError if |optking__geom_maxiter| exceeded without reaching geometry convergence.
:PSI variables:
.. hlist::
:columns: 1
* :psivar:`CURRENT ENERGY <CURRENTENERGY>`
:type name: string
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the database. May be any valid argument to
:py:func:`~driver.energy`.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
:type return_history: :ref:`boolean <op_py_boolean>`
:param return_history: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return dictionary of lists of geometries,
energies, and gradients at each step in the optimization.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``gradient`` |dr| || ``energy`` || ``cbs``
Indicates the type of calculation to be performed on the molecule.
The default dertype accesses ``'gradient'`` or ``'energy'``, while
``'cbs'`` performs a multistage finite difference calculation.
If a nested series of python functions is intended (see :ref:`sec:intercalls`),
use keyword ``opt_func`` instead of ``func``.
:type mode: string
:param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'``
For a finite difference of energies optimization, indicates whether
the calculations required to complete the
optimization are to be run in one file (``'continuous'``) or are to be
farmed out in an embarrassingly parallel fashion
(``'sow'``/``'reap'``). For the latter, run an initial job with
``'sow'`` and follow instructions in its output file. For maximum
flexibility, ``return_wfn`` is always on in ``'reap'`` mode.
:type dertype: :ref:`dertype <op_py_dertype>`
:param dertype: ``'gradient'`` || ``'energy'``
Indicates whether analytic (if available) or finite difference
optimization is to be performed.
:type hessian_with: string
:param hessian_with: ``'scf'`` || ``'mp2'`` || etc.
Indicates the computational method with which to perform a hessian
analysis to guide the geometry optimization.
.. warning:: Optimizations where the molecule is specified in Z-matrix format
with dummy atoms will result in the geometry being converted to a Cartesian representation.
.. note:: Analytic gradients area available for all methods in the table
below. Optimizations with other methods in the energy table proceed
by finite differences.
.. _`table:grad_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| efp | efp-only optimizations |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| scf | Hartree--Fock (HF) or density functional theory (DFT) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| hf | HF self consistent field (SCF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| dcft | density cumulant functional theory :ref:`[manual] <sec:dcft>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2 | 2nd-order |MollerPlesset| perturbation theory (MP2) :ref:`[manual] <sec:dfmp2>` :ref:`[details] <tlmp2>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp3 | 3rd-order |MollerPlesset| perturbation theory (MP3) :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp3>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| mp2.5 | average of MP2 and MP3 :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tlmp25>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2 | orbital-optimized second-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp3 | orbital-optimized third-order MP perturbation theory :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| omp2.5 | orbital-optimized MP2.5 :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| lccd | Linear CCD :ref:`[manual] <sec:occ_nonoo>` :ref:`[details] <tllccd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| olccd | orbital optimized LCCD :ref:`[manual] <sec:occ_oo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccd | coupled cluster doubles (CCD) :ref:`[manual] <sec:occ_nonoo>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd | coupled cluster singles and doubles (CCSD) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsd>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| ccsd(t) | CCSD with perturbative triples (CCSD(T)) :ref:`[manual] <sec:cc>` :ref:`[details] <tlccsdt>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| eom-ccsd | equation of motion (EOM) CCSD :ref:`[manual] <sec:eomcc>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
.. _`table:grad_scf`:
.. include:: ../autodoc_dft_opt.rst
.. include:: ../cfour_table_grad.rst
:examples:
>>> # [1] Analytic hf optimization
>>> optimize('hf')
>>> # [2] Finite difference mp5 optimization with gradient
>>> # printed to output file
>>> e, wfn = opt('mp5', return_wfn='yes')
>>> wfn.gradient().print_out()
>>> # [3] Forced finite difference hf optimization run in
>>> # embarrassingly parallel fashion
>>> optimize('hf', dertype='energy', mode='sow')
>>> # [4] Can automatically perform complete basis set extrapolations
>>> optimize('MP2/cc-pV([D,T]+d)Z')
>>> # [5] Can automatically perform delta corrections that include extrapolations
>>> # even with a user-defined extrapolation formula. See sample inputs named
>>> # cbs-xtpl* for more examples of this input style
>>> optimize("MP2/aug-cc-pv([d,t]+d)z + d:ccsd(t)/cc-pvdz", corl_scheme=myxtplfn_2)
>>> # [6] Get info like geometry, gradient, energy back after an
>>> # optimization fails. Note that the energy and gradient
>>> # correspond to the last optimization cycle, whereas the
>>> # geometry (by default) is the anticipated *next* optimization step.
>>> try:
>>> optimize('hf/cc-pvtz')
>>> except psi4.OptimizationConvergenceError as ex:
>>> next_geom_coords_as_numpy_array = np.asarray(ex.wfn.molecule().geometry())
"""
kwargs = p4util.kwargs_lower(kwargs)
if hasattr(name, '__call__'):
lowername = name
custom_gradient = True
else:
lowername = name.lower()
custom_gradient = False
return_wfn = kwargs.pop('return_wfn', False)
return_history = kwargs.pop('return_history', False)
if return_history:
# Add wfn once the deep copy issues are worked out
step_energies = []
step_gradients = []
step_coordinates = []
# For CBS wrapper, need to set retention on INTCO file
if custom_gradient or ('/' in lowername):
core.IOManager.shared_object().set_specific_retention(1, True)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("Optimize: Does not currently support 'bsse_type' arguements")
full_hess_every = core.get_option('OPTKING', 'FULL_HESS_EVERY')
steps_since_last_hessian = 0
if custom_gradient and core.has_option_changed('OPTKING', 'FULL_HESS_EVERY'):
raise ValidationError("Optimize: Does not support custom Hessian's yet.")
else:
hessian_with_method = kwargs.get('hessian_with', lowername)
# are we in sow/reap mode?
opt_mode = kwargs.get('mode', 'continuous').lower()
if opt_mode not in ['continuous', 'sow', 'reap']:
raise ValidationError("""Optimize execution mode '%s' not valid.""" % (opt_mode))
optstash = p4util.OptionsState(
['OPTKING', 'INTRAFRAG_STEP_LIMIT'],
['FINDIF', 'HESSIAN_WRITE'],
['OPTKING', 'CART_HESS_READ'],
['SCF', 'GUESS_PERSIST'], # handle on behalf of cbs()
['SCF', 'GUESS'])
n = kwargs.get('opt_iter', 1)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
# If we are freezing cartesian, do not orient or COM
if core.get_local_option("OPTKING", "FROZEN_CARTESIAN"):
molecule.fix_orientation(True)
molecule.fix_com(True)
molecule.update_geometry()
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
initial_sym = moleculeclone.schoenflies_symbol()
while n <= core.get_option('OPTKING', 'GEOM_MAXITER'):
current_sym = moleculeclone.schoenflies_symbol()
if initial_sym != current_sym:
raise ValidationError("""Point group changed! (%s <-- %s) You should restart """
"""using the last geometry in the output, after """
"""carefully making sure all symmetry-dependent """
"""input, such as DOCC, is correct.""" %
(current_sym, initial_sym))
kwargs['opt_iter'] = n
# Use orbitals from previous iteration as a guess
# set within loop so that can be influenced by fns to optimize (e.g., cbs)
if (n > 1) and (opt_mode == 'continuous') and (not core.get_option('SCF', 'GUESS_PERSIST')):
core.set_local_option('SCF', 'GUESS', 'READ')
# Before computing gradient, save previous molecule and wavefunction if this is an IRC optimization
if (n > 1) and (core.get_option('OPTKING', 'OPT_TYPE') == 'IRC'):
old_thisenergy = core.get_variable('CURRENT ENERGY')
# Compute the gradient
G, wfn = gradient(lowername, return_wfn=True, molecule=moleculeclone, **kwargs)
thisenergy = core.get_variable('CURRENT ENERGY')
# above, used to be getting energy as last of energy list from gradient()
# thisenergy below should ultimately be testing on wfn.energy()
# Record optimization steps
# Add wavefunctions later
if return_history:
step_energies.append(thisenergy)
step_coordinates.append(moleculeclone.geometry())
step_gradients.append(G.clone())
# S/R: Quit after getting new displacements or if forming gradient fails
if opt_mode == 'sow':
return (0.0, None)
elif opt_mode == 'reap' and thisenergy == 0.0:
return (0.0, None)
core.set_gradient(G)
# S/R: Move opt data file from last pass into namespace for this pass
if opt_mode == 'reap' and n != 0:
core.IOManager.shared_object().set_specific_retention(1, True)
core.IOManager.shared_object().set_specific_path(1, './')
if 'opt_datafile' in kwargs:
restartfile = kwargs.pop('opt_datafile')
shutil.copy(restartfile, p4util.get_psifile(1))
# opt_func = kwargs.get('opt_func', kwargs.get('func', energy))
# if opt_func.__name__ == 'complete_basis_set':
# core.IOManager.shared_object().set_specific_retention(1, True)
if full_hess_every > -1:
core.set_global_option('HESSIAN_WRITE', True)
# compute Hessian as requested; frequency wipes out gradient so stash it
if ((full_hess_every > -1) and (n == 1)) or (steps_since_last_hessian + 1 == full_hess_every):
G = core.get_gradient() # TODO
core.IOManager.shared_object().set_specific_retention(1, True)
core.IOManager.shared_object().set_specific_path(1, './')
frequencies(hessian_with_method, **kwargs)
steps_since_last_hessian = 0
core.set_gradient(G)
core.set_global_option('CART_HESS_READ', True)
elif (full_hess_every == -1) and core.get_global_option('CART_HESS_READ') and (n == 1):
pass
# Do nothing; user said to read existing hessian once
else:
core.set_global_option('CART_HESS_READ', False)
steps_since_last_hessian += 1
# Take step. communicate to/from/within optking through legacy_molecule
core.set_legacy_molecule(moleculeclone)
optking_rval = core.optking()
moleculeclone = core.get_legacy_molecule()
moleculeclone.update_geometry()
if optking_rval == core.PsiReturnType.EndLoop:
# if this is the end of an IRC run, set wfn, energy, and molecule to that
# of the last optimized IRC point
if core.get_option('OPTKING', 'OPT_TYPE') == 'IRC':
thisenergy = old_thisenergy
print('Optimizer: Optimization complete!')
core.print_out('\n Final optimized geometry and variables:\n')
moleculeclone.print_in_input_format()
# Check if user wants to see the intcos; if so, don't delete them.
if core.get_option('OPTKING', 'INTCOS_GENERATE_EXIT') == False:
if core.get_option('OPTKING', 'KEEP_INTCOS') == False:
core.opt_clean()
# Changing environment to optimized geometry as expected by user
molecule.set_geometry(moleculeclone.geometry())
for postcallback in hooks['optimize']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
core.clean()
# S/R: Clean up opt input file
if opt_mode == 'reap':
with open('OPT-master.in', 'wb') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the gradient() wrapper.\n\n'.encode('utf-8'))
fmaster.write('# Optimization complete!\n\n'.encode('utf-8'))
# Cleanup binary file 1
if custom_gradient or ('/' in lowername):
core.IOManager.shared_object().set_specific_retention(1, False)
optstash.restore()
if return_history:
history = { 'energy' : step_energies ,
'gradient' : step_gradients ,
'coordinates' : step_coordinates,
}
if return_wfn and return_history:
return (thisenergy, wfn, history)
elif return_wfn and not return_history:
return (thisenergy, wfn)
elif return_history and not return_wfn:
return (thisenergy, history)
else:
return thisenergy
elif optking_rval == core.PsiReturnType.Failure:
print('Optimizer: Optimization failed!')
if (core.get_option('OPTKING', 'KEEP_INTCOS') == False):
core.opt_clean()
molecule.set_geometry(moleculeclone.geometry())
core.clean()
optstash.restore()
raise OptimizationConvergenceError("""geometry optimization""", n - 1, wfn)
return thisenergy
core.print_out('\n Structure for next step:\n')
moleculeclone.print_in_input_format()
# S/R: Preserve opt data file for next pass and switch modes to get new displacements
if opt_mode == 'reap':
kwargs['opt_datafile'] = p4util.get_psifile(1)
kwargs['mode'] = 'sow'
n += 1
if core.get_option('OPTKING', 'INTCOS_GENERATE_EXIT') == False:
if core.get_option('OPTKING', 'KEEP_INTCOS') == False:
core.opt_clean()
optstash.restore()
raise OptimizationConvergenceError("""geometry optimization""", n - 1, wfn)
def hessian(name, **kwargs):
r"""Function complementary to :py:func:`~frequency`. Computes force
constants, deciding analytic, finite difference of gradients, or
finite difference of energies.
:returns: :py:class:`~psi4.core.Matrix` |w--w| Total non-mass-weighted electronic Hessian in Hartrees/Bohr/Bohr.
:returns: (:py:class:`~psi4.core.Matrix`, :py:class:`~psi4.core.Wavefunction`) |w--w| Hessian and wavefunction when **return_wfn** specified.
:examples:
>>> # [1] Frequency calculation without thermochemical analysis
>>> hessian('mp3')
>>> # [2] Frequency calc w/o thermo analysis getting the Hessian
>>> # in file, core.Matrix, and np.array forms
>>> set hessian_write on
>>> H, wfn = hessian('ccsd', return_wfn=True)
>>> wfn.hessian().print_out()
>>> np.array(H)
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce to CP if bsse kwarg (someday)
if kwargs.get('bsse_type', None) is not None:
raise ValidationError("Hessian: Cannot specify bsse_type for hessian yet.")
# Figure out what kind of gradient this is
if hasattr(name, '__call__'):
if name.__name__ in ['cbs', 'complete_basis_set']:
gradient_type = 'cbs_wrapper'
else:
# Bounce to name if name is non-CBS function
gradient_type = 'custom_function'
elif '/' in name:
gradient_type = 'cbs_gufunc'
else:
gradient_type = 'conventional'
if gradient_type != 'conventional':
raise ValidationError("Hessian: Does not yet support more advanced input or custom functions.")
lowername = name.lower()
# Check if this is a CBS extrapolation
if "/" in lowername:
return driver_cbs._cbs_gufunc('hessian', lowername, **kwargs)
return_wfn = kwargs.pop('return_wfn', False)
core.clean_variables()
dertype = 2
# Prevent methods that do not have associated energies
if lowername in energy_only_methods:
raise ValidationError("hessian('%s') does not have an associated hessian" % name)
optstash = p4util.OptionsState(
['FINDIF', 'HESSIAN_WRITE'],
['FINDIF', 'FD_PROJECT'],
)
# Allow specification of methods to arbitrary order
lowername, level = driver_util.parse_arbitrary_order(lowername)
if level:
kwargs['level'] = level
dertype = _find_derivative_type('hessian', lowername, kwargs.pop('freq_dertype', kwargs.pop('dertype', None)))
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# S/R: Mode of operation- whether finite difference freq run in one job or files farmed out
freq_mode = kwargs.pop('mode', 'continuous').lower()
if freq_mode == 'continuous':
pass
elif freq_mode == 'sow':
if dertype == 2:
raise ValidationError("""Frequency execution mode 'sow' not valid for analytic Hessian calculation.""")
elif freq_mode == 'reap':
freq_linkage = kwargs.get('linkage', None)
if freq_linkage is None:
raise ValidationError("""Frequency execution mode 'reap' requires a linkage option.""")
else:
raise ValidationError("""Frequency execution mode '%s' not valid.""" % (freq_mode))
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash_conv = driver_util._set_convergence_criterion('energy', lowername, 8, 10, 8, 10, 8)
# Select certain irreps
irrep = kwargs.get('irrep', -1)
if irrep == -1:
pass # do all irreps
else:
irrep = driver_util.parse_cotton_irreps(irrep, molecule.schoenflies_symbol())
irrep -= 1 # A1 irrep is externally 1, internally 0
if dertype == 2:
core.print_out("""hessian() switching to finite difference by gradients for partial Hessian calculation.\n""")
dertype = 1
# At stationary point?
if 'ref_gradient' in kwargs:
core.print_out("""hessian() using ref_gradient to assess stationary point.\n""")
G0 = kwargs['ref_gradient']
else:
G0 = gradient(lowername, molecule=molecule, **kwargs)
translations_projection_sound, rotations_projection_sound = _energy_is_invariant(G0)
core.print_out('\n Based on options and gradient (rms={:.2E}), recommend {}projecting translations and {}projecting rotations.\n'.
format(G0.rms(), '' if translations_projection_sound else 'not ',
'' if rotations_projection_sound else 'not '))
if not core.has_option_changed('FINDIF', 'FD_PROJECT'):
core.set_local_option('FINDIF', 'FD_PROJECT', rotations_projection_sound)
# Does an analytic procedure exist for the requested method?
if dertype == 2:
core.print_out("""hessian() will perform analytic frequency computation.\n""")
# We have the desired method. Do it.
wfn = procedures['hessian'][lowername](lowername, molecule=molecule, **kwargs)
wfn.set_gradient(G0)
optstash.restore()
optstash_conv.restore()
# TODO: check that current energy's being set to the right figure when this code is actually used
core.set_variable('CURRENT ENERGY', wfn.energy())
_hessian_write(wfn)
if return_wfn:
return (wfn.hessian(), wfn)
else:
return wfn.hessian()
elif dertype == 1:
core.print_out("""hessian() will perform frequency computation by finite difference of analytic gradients.\n""")
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
# Obtain list of displacements
displacements = core.fd_geoms_freq_1(moleculeclone, irrep)
moleculeclone.reinterpret_coordentry(False)
moleculeclone.fix_orientation(True)
# Record undisplaced symmetry for projection of displaced point groups
core.set_parent_symmetry(molecule.schoenflies_symbol())
ndisp = len(displacements)
print(""" %d displacements needed.""" % ndisp)
gradients = []
energies = []
# S/R: Write instructions for sow/reap procedure to output file and reap input file
if freq_mode == 'sow':
instructionsO = """\n# The frequency sow/reap procedure has been selected through mode='sow'. In addition\n"""
instructionsO += """# to this output file (which contains no quantum chemical calculations), this job\n"""
instructionsO += """# has produced a number of input files (FREQ-*.in) for individual components\n"""
instructionsO += """# and a single input file (FREQ-master.in) with a frequency(mode='reap') command.\n"""
instructionsO += """# These files may look very peculiar since they contain processed and pickled python\n"""
instructionsO += """# rather than normal input. Follow the instructions below (repeated in FREQ-master.in)\n"""
instructionsO += """# to continue.\n#\n"""
instructionsO += """# Alternatively, a single-job execution of the hessian may be accessed through\n"""
instructionsO += """# the frequency wrapper option mode='continuous'.\n#\n"""
core.print_out(instructionsO)
instructionsM = """\n# Follow the instructions below to carry out this frequency computation.\n#\n"""
instructionsM += """# (1) Run all of the FREQ-*.in input files on any variety of computer architecture.\n"""
instructionsM += """# The output file names must be as given below (these are the defaults when executed\n"""
instructionsM += """# as `psi4 FREQ-1.in`, etc.).\n#\n"""
for rgt in range(ndisp):
pre = 'FREQ-' + str(rgt + 1)
instructionsM += """# psi4 -i %-27s -o %-27s\n""" % (pre + '.in', pre + '.out')
instructionsM += """#\n# (2) Gather all the resulting output files in a directory. Place input file\n"""
instructionsM += """# FREQ-master.in into that directory and run it. The job will be minimal in\n"""
instructionsM += """# length and give summary results for the frequency computation in its output file.\n#\n"""
instructionsM += """# psi4 -i %-27s -o %-27s\n#\n\n""" % ('FREQ-master.in', 'FREQ-master.out')
with open('FREQ-master.in', 'wb') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the hessian() wrapper.\n\n'.encode('utf-8'))
fmaster.write(p4util.format_molecule_for_input(moleculeclone).encode('utf-8'))
fmaster.write(p4util.format_options_for_input(moleculeclone, **kwargs))
p4util.format_kwargs_for_input(fmaster, lmode=2, return_wfn=True, freq_dertype=1, **kwargs)
fmaster.write(("""retE, retwfn = %s('%s', **kwargs)\n\n""" % (frequency.__name__, lowername)).encode('utf-8'))
fmaster.write(instructionsM.encode('utf-8'))
core.print_out(instructionsM)
for n, displacement in enumerate(displacements):
rfile = 'FREQ-%s' % (n + 1)
# Build string of title banner
banners = ''
banners += """core.print_out('\\n')\n"""
banners += """p4util.banner(' Hessian Computation: Gradient Displacement %d ')\n""" % (n + 1)
banners += """core.print_out('\\n')\n\n"""
if freq_mode == 'continuous':
# print progress to file and screen
core.print_out('\n')
p4util.banner('Loading displacement %d of %d' % (n + 1, ndisp))
print(""" %d""" % (n + 1), end=('\n' if (n + 1 == ndisp) else ''))
sys.stdout.flush()
# Load in displacement into the active molecule (xyz coordinates only)
moleculeclone.set_geometry(displacement)
# Perform the gradient calculation
G, wfn = gradient(lowername, molecule=moleculeclone, return_wfn=True, **kwargs)
gradients.append(wfn.gradient())
energies.append(core.get_variable('CURRENT ENERGY'))
# clean may be necessary when changing irreps of displacements
core.clean()
# S/R: Write each displaced geometry to an input file
elif freq_mode == 'sow':
moleculeclone.set_geometry(displacement)
# S/R: Prepare molecule, options, kwargs, function call and energy save
# forcexyz in molecule writer S/R enforcement of !reinterpret_coordentry above
with open('%s.in' % (rfile), 'wb') as freagent:
freagent.write('# This is a psi4 input file auto-generated from the hessian() wrapper.\n\n')
freagent.write(p4util.format_molecule_for_input(moleculeclone, forcexyz=True).encode('utf-8'))
freagent.write(p4util.format_options_for_input(moleculeclone, **kwargs).encode('utf-8'))
kwargs['return_wfn'] = True
p4util.format_kwargs_for_input(freagent, **kwargs)
freagent.write("""G, wfn = %s('%s', **kwargs)\n\n""" % (gradient.__name__, lowername))
freagent.write("""core.print_out('\\nHESSIAN RESULT: computation %d for item %d """ % (os.getpid(), n + 1))
freagent.write("""yields electronic gradient %r\\n' % (p4util.mat2arr(wfn.gradient())))\n\n""")
freagent.write("""core.print_out('\\nHESSIAN RESULT: computation %d for item %d """ % (os.getpid(), n + 1))
freagent.write("""yields electronic energy %20.12f\\n' % (get_variable('CURRENT ENERGY')))\n\n""")
# S/R: Read energy from each displaced geometry output file and save in energies array
elif freq_mode == 'reap':
exec(banners)
core.set_variable('NUCLEAR REPULSION ENERGY', moleculeclone.nuclear_repulsion_energy())
pygrad = p4util.extract_sowreap_from_output(rfile, 'HESSIAN', n, freq_linkage, True, label='electronic gradient')
p4mat = core.Matrix.from_list(pygrad)
p4mat.print_out()
gradients.append(p4mat)
energies.append(p4util.extract_sowreap_from_output(rfile, 'HESSIAN', n, freq_linkage, True))
# S/R: Quit sow after writing files. Initialize skeleton wfn to receive grad for reap
if freq_mode == 'sow':
optstash.restore()
optstash_conv.restore()
if return_wfn:
return (None, None)
else:
return None
elif freq_mode == 'reap':
wfn = core.Wavefunction.build(molecule, core.get_global_option('BASIS'))
# Assemble Hessian from gradients
# Final disp is undisp, so wfn has mol, G, H general to freq calc
H = core.fd_freq_1(molecule, gradients, irrep) # TODO or moleculeclone?
wfn.set_hessian(H)
wfn.set_gradient(G0)
wfn.set_frequencies(core.get_frequencies())
# The last item in the list is the reference energy, return it
core.set_variable('CURRENT ENERGY', energies[-1])
core.set_parent_symmetry('')
optstash.restore()
optstash_conv.restore()
_hessian_write(wfn)
if return_wfn:
return (wfn.hessian(), wfn)
else:
return wfn.hessian()
else:
core.print_out("""hessian() will perform frequency computation by finite difference of analytic energies.\n""")
# Set method-dependent scf convergence criteria (test on procedures['energy'] since that's guaranteed)
optstash.restore()
optstash_conv.restore()
optstash_conv = driver_util._set_convergence_criterion('energy', lowername, 10, 11, 10, 11, 10)
# Shifting the geometry so need to copy the active molecule
moleculeclone = molecule.clone()
# Obtain list of displacements
displacements = core.fd_geoms_freq_0(moleculeclone, irrep)
moleculeclone.fix_orientation(True)
moleculeclone.reinterpret_coordentry(False)
# Record undisplaced symmetry for projection of diplaced point groups
core.set_parent_symmetry(molecule.schoenflies_symbol())
ndisp = len(displacements)
# This version is pretty dependent on the reference geometry being last (as it is now)
print(' %d displacements needed.' % ndisp)
energies = []
# S/R: Write instructions for sow/reap procedure to output file and reap input file
if freq_mode == 'sow':
instructionsO = """\n# The frequency sow/reap procedure has been selected through mode='sow'. In addition\n"""
instructionsO += """# to this output file (which contains no quantum chemical calculations), this job\n"""
instructionsO += """# has produced a number of input files (FREQ-*.in) for individual components\n"""
instructionsO += """# and a single input file (FREQ-master.in) with a frequency(mode='reap') command.\n"""
instructionsO += """# These files may look very peculiar since they contain processed and pickled python\n"""
instructionsO += """# rather than normal input. Follow the instructions below (repeated in FREQ-master.in)\n"""
instructionsO += """# to continue.\n#\n"""
instructionsO += """# Alternatively, a single-job execution of the hessian may be accessed through\n"""
instructionsO += """# the frequency wrapper option mode='continuous'.\n#\n"""
core.print_out(instructionsO)
instructionsM = """\n# Follow the instructions below to carry out this frequency computation.\n#\n"""
instructionsM += """# (1) Run all of the FREQ-*.in input files on any variety of computer architecture.\n"""
instructionsM += """# The output file names must be as given below (these are the defaults when executed\n"""
instructionsM += """# as `psi4 FREQ-1.in`, etc.).\n#\n"""
for rgt in range(ndisp):
pre = 'FREQ-' + str(rgt + 1)
instructionsM += """# psi4 -i %-27s -o %-27s\n""" % (pre + '.in', pre + '.out')
instructionsM += """#\n# (2) Gather all the resulting output files in a directory. Place input file\n"""
instructionsM += """# FREQ-master.in into that directory and run it. The job will be minimal in\n"""
instructionsM += """# length and give summary results for the frequency computation in its output file.\n#\n"""
instructionsM += """# psi4 -i %-27s -o %-27s\n#\n\n""" % ('FREQ-master.in', 'FREQ-master.out')
with open('FREQ-master.in', 'wb') as fmaster:
fmaster.write('# This is a psi4 input file auto-generated from the hessian() wrapper.\n\n'.encode('utf-8'))
fmaster.write(p4util.format_molecule_for_input(moleculeclone).encode('utf-8'))
fmaster.write(p4util.format_options_for_input(moleculeclone, **kwargs))
p4util.format_kwargs_for_input(fmaster, lmode=2, return_wfn=True, freq_dertype=0, **kwargs)
fmaster.write(("""retE, retwfn = %s('%s', **kwargs)\n\n""" % (frequency.__name__, lowername)).encode('utf-8'))
fmaster.write(instructionsM.encode('utf-8'))
core.print_out(instructionsM)
for n, displacement in enumerate(displacements):
rfile = 'FREQ-%s' % (n + 1)
# Build string of title banner
banners = ''
banners += """core.print_out('\\n')\n"""
banners += """p4util.banner(' Hessian Computation: Energy Displacement %d ')\n""" % (n + 1)
banners += """core.print_out('\\n')\n\n"""
if freq_mode == 'continuous':
# print progress to file and screen
core.print_out('\n')
p4util.banner('Loading displacement %d of %d' % (n + 1, ndisp))
print(""" %d""" % (n + 1), end=('\n' if (n + 1 == ndisp) else ''))
sys.stdout.flush()
# Load in displacement into the active molecule
moleculeclone.set_geometry(displacement)
# Perform the energy calculation
E, wfn = energy(lowername, return_wfn=True, molecule=moleculeclone, **kwargs)
energies.append(core.get_variable('CURRENT ENERGY'))
# clean may be necessary when changing irreps of displacements
core.clean()
# S/R: Write each displaced geometry to an input file
elif freq_mode == 'sow':
moleculeclone.set_geometry(displacement)
# S/R: Prepare molecule, options, kwargs, function call and energy save
with open('%s.in' % (rfile), 'wb') as freagent:
freagent.write('# This is a psi4 input file auto-generated from the gradient() wrapper.\n\n')
freagent.write(p4util.format_molecule_for_input(moleculeclone, forcexyz=True).encode('utf-8'))
freagent.write(p4util.format_options_for_input(moleculeclone, **kwargs).encode('utf-8'))
p4util.format_kwargs_for_input(freagent, **kwargs)
freagent.write("""electronic_energy = %s('%s', **kwargs)\n\n""" % (energy.__name__, lowername))
freagent.write("""core.print_out('\\nHESSIAN RESULT: computation %d for item %d """ % (os.getpid(), n + 1))
freagent.write("""yields electronic energy %20.12f\\n' % (electronic_energy))\n\n""")
# S/R: Read energy from each displaced geometry output file and save in energies array
elif freq_mode == 'reap':
exec(banners)
core.set_variable('NUCLEAR REPULSION ENERGY', moleculeclone.nuclear_repulsion_energy())
energies.append(p4util.extract_sowreap_from_output(rfile, 'HESSIAN', n, freq_linkage, True))
# S/R: Quit sow after writing files. Initialize skeleton wfn to receive grad for reap
if freq_mode == 'sow':
optstash.restore()
optstash_conv.restore()
if return_wfn:
return (None, None)
else:
return None
elif freq_mode == 'reap':
# core.set_variable('CURRENT ENERGY', energies[-1])
wfn = core.Wavefunction.build(molecule, core.get_global_option('BASIS'))
# Assemble Hessian from energies
H = core.fd_freq_0(molecule, energies, irrep)
wfn.set_hessian(H)
wfn.set_gradient(G0)
wfn.set_frequencies(core.get_frequencies())
# The last item in the list is the reference energy, return it
core.set_variable('CURRENT ENERGY', energies[-1])
core.set_parent_symmetry('')
optstash.restore()
optstash_conv.restore()
_hessian_write(wfn)
if return_wfn:
return (wfn.hessian(), wfn)
else:
return wfn.hessian()
def frequency(name, **kwargs):
r"""Function to compute harmonic vibrational frequencies.
:aliases: frequencies(), freq()
:returns: *float* |w--w| Total electronic energy in Hartrees.
:returns: (*float*, :py:class:`~psi4.core.Wavefunction`) |w--w| energy and wavefunction when **return_wfn** specified.
:type name: string
:param name: ``'scf'`` || ``'mp2'`` || ``'ci5'`` || etc.
First argument, usually unlabeled. Indicates the computational method
to be applied to the system.
:type molecule: :ref:`molecule <op_py_molecule>`
:param molecule: ``h2o`` || etc.
The target molecule, if not the last molecule defined.
:type return_wfn: :ref:`boolean <op_py_boolean>`
:param return_wfn: ``'on'`` || |dl| ``'off'`` |dr|
Indicate to additionally return the :py:class:`~psi4.core.Wavefunction`
calculation result as the second element (after *float* energy) of a tuple.
Arrays of frequencies and the Hessian can be accessed through the wavefunction.
:type func: :ref:`function <op_py_function>`
:param func: |dl| ``gradient`` |dr| || ``energy`` || ``cbs``
Indicates the type of calculation to be performed on the molecule.
The default dertype accesses ``'gradient'`` or ``'energy'``, while
``'cbs'`` performs a multistage finite difference calculation.
If a nested series of python functions is intended (see :ref:`sec:intercalls`),
use keyword ``freq_func`` instead of ``func``.
:type mode: string
:param mode: |dl| ``'continuous'`` |dr| || ``'sow'`` || ``'reap'``
For a finite difference of energies or gradients frequency, indicates
whether the calculations required to complete the frequency are to be run
in one file (``'continuous'``) or are to be farmed out in an
embarrassingly parallel fashion (``'sow'``/``'reap'``)/ For the latter,
run an initial job with ``'sow'`` and follow instructions in its output file.
For maximum flexibility, ``return_wfn`` is always on in ``'reap'`` mode.
:type dertype: :ref:`dertype <op_py_dertype>`
:param dertype: |dl| ``'hessian'`` |dr| || ``'gradient'`` || ``'energy'``
Indicates whether analytic (if available- they're not), finite
difference of gradients (if available) or finite difference of
energies is to be performed.
:type irrep: int or string
:param irrep: |dl| ``-1`` |dr| || ``1`` || ``'b2'`` || ``'App'`` || etc.
Indicates which symmetry block (:ref:`Cotton <table:irrepOrdering>` ordering) of vibrational
frequencies to be computed. ``1``, ``'1'``, or ``'a1'`` represents
:math:`a_1`, requesting only the totally symmetric modes.
``-1`` indicates a full frequency calculation.
.. note:: Analytic hessians are only available for RHF. For all other methods, Frequencies will
proceed through finite differences according to availability of gradients or energies.
.. _`table:freq_gen`:
+-------------------------+---------------------------------------------------------------------------------------------------------------+
| name | calls method |
+=========================+===============================================================================================================+
| scf | Hartree--Fock (HF) :ref:`[manual] <sec:scf>` |
+-------------------------+---------------------------------------------------------------------------------------------------------------+
:examples:
>>> # [1] Frequency calculation for all modes through highest available derivatives
>>> frequency('ccsd')
>>> # [2] Frequency calculation for b2 modes through finite difference of gradients
>>> # printing lowest mode frequency to screen and Hessian to output
>>> E, wfn = frequencies('scf', dertype=1, irrep=4, return_wfn=True)
>>> print wfn.frequencies().get(0, 0)
>>> wfn.hessian().print_out()
>>> # [3] Frequency calculation at default conditions and Hessian reuse at STP
>>> E, wfn = freq('mp2', return_wfn=True)
>>> set t 273.15
>>> set p 100000
>>> thermo(wfn, wfn.frequencies())
>>> # [4] Opt+Freq, skipping the gradient recalc at the start of the Hessian
>>> e, wfn = optimize('hf', return_wfn=True)
>>> frequencies('hf', ref_gradient=wfn.gradient())
"""
kwargs = p4util.kwargs_lower(kwargs)
# Bounce (someday) if name is function
if hasattr(name, '__call__'):
raise ValidationError("Frequency: Cannot use custom function")
lowername = name.lower()
if "/" in lowername:
return driver_cbs._cbs_gufunc(frequency, name, ptype='frequency', **kwargs)
if kwargs.get('bsse_type', None) is not None:
raise ValdiationError("Frequency: Does not currently support 'bsse_type' arguements")
return_wfn = kwargs.pop('return_wfn', False)
# are we in sow/reap mode?
freq_mode = kwargs.get('mode', 'continuous').lower()
if freq_mode not in ['continuous', 'sow', 'reap']:
raise ValidationError("""Frequency execution mode '%s' not valid.""" % (freq_mode))
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
# Compute the hessian
H, wfn = hessian(lowername, return_wfn=True, molecule=molecule, **kwargs)
# S/R: Quit after getting new displacements
if freq_mode == 'sow':
return 0.0
# Project final frequencies?
translations_projection_sound, rotations_projection_sound = _energy_is_invariant(wfn.gradient())
project_trans = kwargs.get('project_trans', translations_projection_sound)
project_rot = kwargs.get('project_rot', rotations_projection_sound)
irrep = kwargs.get('irrep', None)
vibinfo = vibanal_wfn(wfn, irrep=irrep, project_trans=project_trans, project_rot=project_rot)
vibonly = qcdb.vib.filter_nonvib(vibinfo)
wfn.set_frequencies(core.Vector.from_array(qcdb.vib.filter_omega_to_real(vibonly['omega'].data)))
wfn.frequency_analysis = vibinfo
for postcallback in hooks['frequency']['post']:
postcallback(lowername, wfn=wfn, **kwargs)
if return_wfn:
return (core.get_variable('CURRENT ENERGY'), wfn)
else:
return core.get_variable('CURRENT ENERGY')
def vibanal_wfn(wfn, hess=None, irrep=None, molecule=None, project_trans=True, project_rot=True):
if hess is None:
nmwhess = np.asarray(wfn.hessian())
else:
nmwhess = hess
mol = wfn.molecule()
geom = np.asarray(mol.geometry())
symbols = [mol.symbol(at) for at in range(mol.natom())]
vibrec = {'molecule': mol.to_dict(np_out=False),
'hessian': nmwhess.tolist()}
if molecule is not None:
molecule.update_geometry()
if mol.natom() != molecule.natom():
raise ValidationError('Impostor molecule trying to be analyzed! natom {} != {}'.format(mol.natom(), molecule.natom()))
if abs(mol.nuclear_repulsion_energy() - molecule.nuclear_repulsion_energy()) > 1.e-6:
raise ValidationError('Impostor molecule trying to be analyzed! NRE {} != {}'.format(mol.nuclear_repulsion_energy(), molecule.nuclear_repulsion_energy()))
if not np.allclose(np.asarray(mol.geometry()), np.asarray(molecule.geometry()), atol=1.e-6):
core.print_out('Warning: geometry center/orientation mismatch. Normal modes may not be in expected coordinate system.')
# raise ValidationError('Impostor molecule trying to be analyzed! geometry\n{}\n !=\n{}'.format(
# np.asarray(mol.geometry()), np.asarray(molecule.geometry())))
mol = molecule
m = np.asarray([mol.mass(at) for at in range(mol.natom())])
irrep_labels = mol.irrep_labels()
vibinfo, vibtext = qcdb.vib.harmonic_analysis(nmwhess, geom, m, wfn.basisset(), irrep_labels,
project_trans=project_trans, project_rot=project_rot)
vibrec.update({k: qca.to_dict() for k, qca in vibinfo.items()})
core.print_out(vibtext)
core.print_out(qcdb.vib.print_vibs(vibinfo, shortlong=True, normco='x', atom_lbl=symbols))
if core.has_option_changed('THERMO', 'ROTATIONAL_SYMMETRY_NUMBER'):
rsn = core.get_option('THERMO', 'ROTATIONAL_SYMMETRY_NUMBER')
else:
rsn = mol.rotational_symmetry_number()
if irrep is None:
therminfo, thermtext = qcdb.vib.thermo(vibinfo,
T=core.get_option("THERMO", "T"), # 298.15 [K]
P=core.get_option("THERMO", "P"), # 101325. [Pa]
multiplicity=mol.multiplicity(),
molecular_mass=np.sum(m),
sigma=rsn,
rotor_type=mol.rotor_type(),
rot_const=np.asarray(mol.rotational_constants()),
E0=core.get_variable('CURRENT ENERGY')) # someday, wfn.energy()
vibrec.update({k: qca.to_dict() for k, qca in therminfo.items()})
core.set_variable("ZPVE", therminfo['ZPE_corr'].data)
core.set_variable("THERMAL ENERGY CORRECTION", therminfo['E_corr'].data)
core.set_variable("ENTHALPY CORRECTION", therminfo['H_corr'].data)
core.set_variable("GIBBS FREE ENERGY CORRECTION", therminfo['G_corr'].data)
core.set_variable("ZERO K ENTHALPHY", therminfo['ZPE_tot'].data)
core.set_variable("THERMAL ENERGY", therminfo['E_tot'].data)
core.set_variable("ENTHALPY", therminfo['H_tot'].data)
core.set_variable("GIBBS FREE ENERGY", therminfo['G_tot'].data)
core.print_out(thermtext)
else:
core.print_out(' Thermochemical analysis skipped for partial frequency calculation.\n')
if core.get_option('FINDIF', 'HESSIAN_WRITE'):
filename = core.get_writer_file_prefix(mol.name()) + ".vibrec"
with open(filename, 'w') as handle:
json.dump(vibrec, handle, sort_keys=True, indent=4)
if core.get_option('FINDIF', 'NORMAL_MODES_WRITE'):
filename = core.get_writer_file_prefix(mol.name()) + ".molden_normal_modes"
with open(filename, 'w') as handle:
handle.write(qcdb.vib.print_molden_vibs(vibinfo, symbols, geom, standalone=True))
return vibinfo
def _hessian_write(wfn):
if core.get_option('FINDIF', 'HESSIAN_WRITE'):
filename = core.get_writer_file_prefix(wfn.molecule().name()) + ".hess"
with open(filename, 'wb') as handle:
qcdb.hessparse.to_string(np.asarray(wfn.hessian()), handle, dtype='psi4')
def gdma(wfn, datafile=""):
"""Function to use wavefunction information in *wfn* and, if specified,
additional commands in *filename* to run GDMA analysis.
.. include:: ../autodoc_abbr_options_c.rst
.. versionadded:: 0.6
:returns: None
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate DMA analysis
:type datafile: string
:param datafile: optional control file (see GDMA manual) to peform more complicated DMA
analyses. If this option is used, the File keyword must be set to read
a filename.fchk, where filename is provided by |globals__writer_file_label| .
:examples:
>>> # [1] DMA analysis from MP2 wavefunction. N.B. gradient must be requested to generate MP2 density.
>>> grad, wfn = gradient('mp2', return_wfn=True)
>>> gdma(wfn)
"""
# Start by writing a G* checkpoint file, for the GDMA code to read in
fw = core.FCHKWriter(wfn)
molname = wfn.molecule().name()
prefix = core.get_writer_file_prefix(molname)
fchkfile = prefix + '.fchk'
fw.write(fchkfile)
if datafile:
commands = datafile
else:
densname = wfn.name()
if densname == "DFT":
densname = "SCF"
commands = 'psi4_dma_datafile.dma'
radii = core.get_option('GDMA', 'GDMA_RADIUS')
origin = core.get_option('GDMA', 'GDMA_ORIGIN')
with open(commands, 'w') as f:
f.write("File %s Density %s\n" % (fchkfile, densname))
f.write("Angstrom\n")
f.write("%s\n" % core.get_option('GDMA', 'GDMA_MULTIPOLE_UNITS'))
f.write("Multipoles\n")
if origin:
try:
f.write("Origin %f %f %f\n" % (float(origin[0]), float(origin[1]), float(origin[2])))
except:
raise ValidationError("The GDMA origin array should contain three entries: x, y, and z.")
f.write("Switch %f\n" % core.get_option('GDMA', 'GDMA_SWITCH'))
if radii:
f.write("Radius %s\n" % " ".join([str(r) for r in radii]))
f.write("Limit %d\n" % core.get_option('GDMA', 'GDMA_LIMIT'))
f.write("Start\n")
f.write("Finish\n")
core.run_gdma(wfn, commands)
os.remove(fchkfile)
# If we generated the DMA control file, we should clean up here
if not datafile:
os.remove(commands)
def fchk(wfn, filename):
"""Function to write wavefunction information in *wfn* to *filename* in
Gaussian FCHK format.
.. versionadded:: 0.6
:returns: None
:type filename: string
:param filename: destination file name for FCHK file
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate fchk file
:examples:
>>> # [1] FCHK file for DFT calculation
>>> E, wfn = energy('b3lyp', return_wfn=True)
>>> fchk(wfn, 'mycalc.fchk')
"""
fw = core.FCHKWriter(wfn)
fw.write(filename)
def molden(wfn, filename=None, density_a=None, density_b=None, dovirtual=None):
"""Function to write wavefunction information in *wfn* to *filename* in
molden format. Will write natural orbitals from *density* (MO basis) if supplied.
Warning! Most post-SCF Wavefunctions do not build the density as this is often
much more costly than the energy. In addition, the Wavefunction density attributes
(Da and Db) return the SO density and must be transformed to the MO basis
to use with this function.
.. versionadded:: 0.5
*wfn* parameter passed explicitly
:returns: None
:type wfn: :py:class:`~psi4.core.Wavefunction`
:param wfn: set of molecule, basis, orbitals from which to generate cube files
:type filename: string
:param filename: destination file name for MOLDEN file (optional)
:type density_a: :py:class:`~psi4.core.Matrix`
:param density_a: density in the MO basis to build alpha NO's from (optional)
:type density_b: :py:class:`~psi4.core.Matrix`
:param density_b: density in the MO basis to build beta NO's from, assumes restricted if not supplied (optional)
:type dovirtual: bool
:param dovirtual: do write all the MOs to the MOLDEN file (true) or discard the unoccupied MOs, not valid for NO's (false) (optional)
:examples:
>>> # [1] Molden file for DFT calculation
>>> E, wfn = energy('b3lyp', return_wfn=True)
>>> molden(wfn, 'mycalc.molden')
>>> # [2] Molden file for CI/MCSCF computation using NO roots
>>> E, wfn = energy('ci', return_wfn=True)
>>> molden(wfn, 'no_root1.molden', density_a=wfn.opdm(0, 0, "A", True))
>>> # [3] The following does NOT work, please see below
>>> E, wfn = energy('ccsd', return_wfn=True)
>>> molden(wfn, 'ccsd_no.molden', density_a=wfn.Da())
>>> # [4] This WILL work, note the transformation of Da (SO->MO)
>>> E, wfn = properties('ccsd', properties=['dipole'], return_wfn=True)
>>> Da_so = wfn.Da()
>>> Da_mo = Matrix.triplet(wfn.Ca(), Da_so, wfn.Ca(), True, False, False)
>>> molden(wfn, 'ccsd_no.molden', density_a=Da_mo)
"""
if filename is None:
filename = core.get_writer_file_prefix(wfn.molecule().name()) + ".molden"
if dovirtual is None:
dovirt = bool(core.get_option("SCF", "MOLDEN_WITH_VIRTUAL"))
else:
dovirt = dovirtual
if density_a:
nmopi = wfn.nmopi()
nsopi = wfn.nsopi()
NO_Ra = core.Matrix("NO Alpha Rotation Matrix", nmopi, nmopi)
NO_occa = core.Vector(nmopi)
density_a.diagonalize(NO_Ra, NO_occa, core.DiagonalizeOrder.Descending)
NO_Ca = core.Matrix("Ca Natural Orbitals", nsopi, nmopi)
NO_Ca.gemm(False, False, 1.0, wfn.Ca(), NO_Ra, 0)
if density_b:
NO_Rb = core.Matrix("NO Beta Rotation Matrix", nmopi, nmopi)
NO_occb = core.Vector(nmopi)
density_b.diagonalize(NO_Rb, NO_occb, core.DiagonalizeOrder.Descending)
NO_Cb = core.Matrix("Cb Natural Orbitals", nsopi, nmopi)
NO_Cb.gemm(False, False, 1.0, wfn.Cb(), NO_Rb, 0)
else:
NO_occb = NO_occa
NO_Cb = NO_Ca
mw = core.MoldenWriter(wfn)
mw.write(filename, NO_Ca, NO_Cb, NO_occa, NO_occb, NO_occa, NO_occb, dovirt)
else:
try:
occa = wfn.occupation_a()
occb = wfn.occupation_b()
except AttributeError:
core.print_out("\n!Molden warning: This wavefunction does not have occupation numbers.\n"
"Writing zero's for occupation numbers\n\n")
occa = core.Vector(wfn.nmopi())
occb = core.Vector(wfn.nmopi())
mw = core.MoldenWriter(wfn)
mw.write(filename, wfn.Ca(), wfn.Cb(), wfn.epsilon_a(), wfn.epsilon_b(), occa, occb, dovirt)
# Aliases
opt = optimize
freq = frequency
frequencies = frequency
prop = properties
| lgpl-3.0 | -2,847,176,584,611,929,000 | 56.55871 | 166 | 0.476895 | false |
icgc-dcc/egasub | tests/test_dataset.py | 1 | 1522 | from egasub.ega.entities.dataset import Dataset
from egasub.ega.entities.dataset_link import DatasetLink
from egasub.ega.entities.attribute import Attribute
links = [DatasetLink('label 1','url1'),DatasetLink('label 2','url2')]
attributes = [Attribute('The tag 1','The value 1','an unit'),Attribute('The tag 2','The value 2','an unit')]
dataset = Dataset('an alias',[3,4,5],3,[6,1,4],[8,21,4],'a title',links,attributes,'dataset description',None,'ega_accession_id')
def test_dataset_type_ids():
assert [3,4,5] == dataset.dataset_type_ids
def test_policy_id():
assert 3 == dataset.policy_id
def test_runs_references():
assert [6,1,4] == dataset.runs_references
def test_analysis_references():
assert [8,21,4] == dataset.analysis_references
def test_dataset_links():
assert links == dataset.dataset_links
def test_attributes():
assert attributes == dataset.attributes
def test_to_dict():
assert cmp(
{
'title' : 'a title',
'datasetTypeIds':[3,4,5],
'policyId':3,
'runsReferences' : [6,1,4],
'analysisReferences' : [8,21,4],
'datasetLinks' : map(lambda dataset_link: dataset_link.to_dict(), links),
'attributes' : map(lambda attribute: attribute.to_dict(), attributes),
'alias' : 'an alias',
'description': 'dataset description',
'egaAccessionId': 'ega_accession_id'
}, dataset.to_dict()) == 0
def test_alias():
assert 'an alias' == dataset.alias | gpl-3.0 | -2,194,082,586,187,153,400 | 32.844444 | 129 | 0.634691 | false |
nisavid/spruce-collections | spruce/collections/_exc.py | 1 | 1237 | """Exceptions"""
__copyright__ = "Copyright (C) 2014 Ivan D Vasin"
__docformat__ = "restructuredtext"
import exceptions as _py_exc
class Exception(_py_exc.Exception):
pass
class Error(RuntimeError, Exception):
pass
class UnsupportedUniversalSetOperation(Error):
"""
A finite set operation was attempted on a universal set
:param operation:
The attempted operation.
:type operation: :obj:`str`
:param message:
A message that describes the error.
:type message: :obj:`str` or null
"""
def __init__(self, operation, set, message=None, *args):
super(UnsupportedUniversalSetOperation, self)\
.__init__(operation, set, message, *args)
self._message = message
self._operation = operation
self._set = set
def __str__(self):
message = '{} is unsupported by the universal set {!r}'\
.format(self.operation, self.set)
if self.message:
message += ': ' + self.message
return message
@property
def message(self):
return self._message
@property
def operation(self):
return self._operation
@property
def set(self):
return self._set
| lgpl-3.0 | 4,857,376,909,560,219,000 | 21.089286 | 64 | 0.607114 | false |
walac/build-mozharness | configs/single_locale/release_mozilla-beta_android_api_9.py | 1 | 5018 | BRANCH = "mozilla-beta"
MOZ_UPDATE_CHANNEL = "beta"
MOZILLA_DIR = BRANCH
OBJDIR = "obj-l10n"
EN_US_BINARY_URL = "http://ftp.mozilla.org/pub/mozilla.org/mobile/candidates/%(version)s-candidates/build%(buildnum)d/android-api-9/en-US"
#STAGE_SERVER = "dev-stage01.srv.releng.scl3.mozilla.com"
STAGE_SERVER = "stage.mozilla.org"
STAGE_USER = "ffxbld"
STAGE_SSH_KEY = "~/.ssh/ffxbld_rsa"
HG_SHARE_BASE_DIR = "/builds/hg-shared"
config = {
"log_name": "single_locale",
"objdir": OBJDIR,
"is_automation": True,
"buildbot_json_path": "buildprops.json",
"purge_minsize": 10,
"force_clobber": True,
"clobberer_url": "https://api.pub.build.mozilla.org/clobberer/lastclobber",
"locales_file": "buildbot-configs/mozilla/l10n-changesets_mobile-beta.json",
"locales_dir": "mobile/android/locales",
"locales_platform": "android",
"ignore_locales": ["en-US"],
"balrog_credentials_file": "oauth.txt",
"tools_repo": "https://hg.mozilla.org/build/tools",
"is_release": True,
"tooltool_config": {
"manifest": "mobile/android/config/tooltool-manifests/android/releng.manifest",
"output_dir": "%(abs_work_dir)s/" + MOZILLA_DIR,
"bootstrap_cmd": ["bash", "-xe", "setup.sh"],
},
"exes": {
'tooltool.py': '/tools/tooltool.py',
},
"tooltool_servers": ["http://tooltool.pvt.build.mozilla.org/build/"],
"repos": [{
"repo": "https://hg.mozilla.org/releases/mozilla-beta",
"revision": "default",
"dest": MOZILLA_DIR,
}, {
"repo": "https://hg.mozilla.org/build/buildbot-configs",
"revision": "default",
"dest": "buildbot-configs"
}, {
"repo": "https://hg.mozilla.org/build/tools",
"revision": "default",
"dest": "tools"
}, {
"repo": "https://hg.mozilla.org/build/compare-locales",
"revision": "RELEASE_AUTOMATION"
}],
"hg_l10n_base": "https://hg.mozilla.org/releases/l10n/%s" % BRANCH,
"hg_l10n_tag": "default",
'vcs_share_base': HG_SHARE_BASE_DIR,
"l10n_dir": MOZILLA_DIR,
"release_config_file": "buildbot-configs/mozilla/release-fennec-mozilla-beta.py",
"repack_env": {
# so ugly, bug 951238
"LD_LIBRARY_PATH": "/lib:/tools/gcc-4.7.2-0moz1/lib:/tools/gcc-4.7.2-0moz1/lib64",
"MOZ_PKG_VERSION": "%(version)s",
"MOZ_OBJDIR": OBJDIR,
"LOCALE_MERGEDIR": "%(abs_merge_dir)s/",
"MOZ_UPDATE_CHANNEL": MOZ_UPDATE_CHANNEL,
},
"base_en_us_binary_url": EN_US_BINARY_URL,
# TODO ideally we could get this info from a central location.
# However, the agility of these individual config files might trump that.
"upload_env": {
"UPLOAD_USER": STAGE_USER,
"UPLOAD_SSH_KEY": STAGE_SSH_KEY,
"UPLOAD_HOST": STAGE_SERVER,
"UPLOAD_TO_TEMP": "1",
"MOZ_PKG_VERSION": "%(version)s",
},
"base_post_upload_cmd": "post_upload.py -p mobile -n %(buildnum)s -v %(version)s --builddir android-api-9/%(locale)s --release-to-mobile-candidates-dir --nightly-dir=candidates",
"merge_locales": True,
"make_dirs": ['config'],
"mozilla_dir": MOZILLA_DIR,
"mozconfig": "%s/mobile/android/config/mozconfigs/android-api-9-10-constrained/l10n-release" % MOZILLA_DIR,
"signature_verification_script": "tools/release/signing/verify-android-signature.sh",
"key_alias": "release",
"default_actions": [
"clobber",
"pull",
"list-locales",
"setup",
"repack",
"upload-repacks",
"submit-to-balrog",
"summary",
],
# Mock
"mock_target": "mozilla-centos6-x86_64-android",
"mock_packages": ['autoconf213', 'python', 'zip', 'mozilla-python27-mercurial', 'git', 'ccache',
'glibc-static', 'libstdc++-static', 'perl-Test-Simple', 'perl-Config-General',
'gtk2-devel', 'libnotify-devel', 'yasm',
'alsa-lib-devel', 'libcurl-devel',
'wireless-tools-devel', 'libX11-devel',
'libXt-devel', 'mesa-libGL-devel',
'gnome-vfs2-devel', 'GConf2-devel', 'wget',
'mpfr', # required for system compiler
'xorg-x11-font*', # fonts required for PGO
'imake', # required for makedepend!?!
'gcc45_0moz3', 'gcc454_0moz1', 'gcc472_0moz1', 'gcc473_0moz1', 'yasm', 'ccache', # <-- from releng repo
'valgrind', 'dbus-x11',
'pulseaudio-libs-devel',
'gstreamer-devel', 'gstreamer-plugins-base-devel',
'freetype-2.3.11-6.el6_1.8.x86_64',
'freetype-devel-2.3.11-6.el6_1.8.x86_64',
'java-1.7.0-openjdk-devel',
'openssh-clients',
'zlib-devel-1.2.3-27.el6.i686',
],
"mock_files": [
("/home/cltbld/.ssh", "/home/mock_mozilla/.ssh"),
],
}
| mpl-2.0 | 7,271,432,839,921,403,000 | 41.525424 | 182 | 0.567756 | false |
ScottWales/threddsclient | threddsclient/nodes.py | 1 | 6231 | """
Python objects for modelling a Thredds server
"""
from bs4 import BeautifulSoup as BSoup
import urlparse
from .utils import size_in_bytes
import logging
logger = logging.getLogger(__name__)
FILE_SERVICE = "HTTPServer"
OPENDAP_SERVICE = "OPENDAP"
WMS_SERVICE = "WMS"
WCS_SERVICE = "WCS"
class Node(object):
"""
Common items to all nodes
"""
def __init__(self, soup, catalog):
self.soup = soup
self.catalog = catalog
self.name = soup.get('name')
self.content_type = None
self.bytes = None
self.modified = None
def __repr__(self):
return "<Node name: {0.name}, content type: {0.content_type}>".format(self)
class Service(Node):
"""
A Thredds service
"""
def __init__(self, soup, catalog):
Node.__init__(self, soup, catalog)
self.base = soup.get('base')
self.url = urlparse.urljoin(self.catalog.url, self.base)
self.service_type = soup.get('serviceType')
self.content_type = "application/service"
self.services = [Service(s, self.catalog) for s in soup.find_all('service', recursive=False)]
class CatalogRef(Node):
"""
A reference to a different Thredds catalog
"""
def __init__(self, soup, catalog):
Node.__init__(self, soup, catalog)
self.title = soup.get('xlink:title')
self.name = self.title
self.href = soup.get('xlink:href')
self.url = urlparse.urljoin(self.catalog.url, self.href)
self.content_type = "application/directory"
def follow(self):
from .client import read_url
return read_url(self.url)
class Dataset(Node):
"""
Abstract dataset class
"""
def __init__(self, soup, catalog):
Node.__init__(self, soup, catalog)
def is_collection(self):
return False
@property
def ID(self):
return self.soup.get('ID')
@property
def url(self):
return "{0}?dataset={1}".format(self.catalog.url, self.ID)
@property
def authority(self):
authority = None
if self.soup.get('authority'):
authority = self.soup.get('authority')
elif self.soup.metadata:
authority = self.soup.metadata.authority
elif self.soup.parent.metadata:
authority = self.soup.parent.metadata.authority
return authority
@property
def service_name(self):
service_name = None
if self.soup.get('servicename'):
service_name = self.soup.get('servicename')
elif self.soup.metadata:
if self.soup.metadata.serviceName:
service_name = self.soup.metadata.serviceName.text
elif self.soup.parent.metadata:
if self.soup.parent.metadata.serviceName:
service_name = self.soup.parent.metadata.serviceName.text
return service_name
@property
def data_type(self):
data_type = None
if self.soup.get('datatype'):
data_type = self.soup.get('datatype')
elif self.soup.metadata:
if self.soup.metadata.dataType:
data_type = self.soup.metadata.dataType.text
elif self.soup.parent.metadata:
if self.soup.parent.metadata.dataType:
data_type = self.soup.parent.metadata.dataType.text
return data_type
@property
def data_format_type(self):
data_format_type = None
if self.soup.dataFormatType:
data_format_type = soup.dataFormatType.text
elif self.soup.metadata:
if self.soup.metadata.dataFormatType:
data_type = self.soup.metadata.dataFormatType.text
elif self.soup.parent.metadata:
if self.soup.parent.metadata.dataFormatType:
data_format_type = self.soup.parent.metadata.dataFormatType.text
return data_format_type
class CollectionDataset(Dataset):
"""
A container for other datasets
"""
def __init__(self, soup, catalog):
Dataset.__init__(self, soup, catalog)
self.collection_type = soup.get('collectionType')
self.harvest = self._harvest(soup)
# TODO: add attributes for harvesting: contributor, keyword, publisher, summary, rights, ...
# see http://www.unidata.ucar.edu/software/thredds/current/tds/tutorial/CatalogPrimer.html#Describing_datasets
self.content_type = "application/directory"
from .catalog import find_datasets
self.datasets = find_datasets(soup, self.catalog)
from .catalog import find_references
self.references = find_references(soup, self.catalog)
def is_collection(self):
return True
@staticmethod
def _harvest(soup):
return soup.get('harvest', 'false') == 'true'
class DirectDataset(Dataset):
"""
A reference to a data file
"""
def __init__(self, soup, catalog):
Dataset.__init__(self, soup, catalog)
self.url_path = soup.get('urlPath')
self.content_type = "application/netcdf"
self.modified = self._modified(soup)
self.bytes = self._bytes(soup)
def access_url(self, service_type=FILE_SERVICE):
url = None
for service in self.catalog.get_services(self.service_name):
if service.service_type == service_type:
url = urlparse.urljoin(service.url, self.url_path)
break
return url
def download_url(self):
return self.access_url(FILE_SERVICE)
def opendap_url(self):
return self.access_url(OPENDAP_SERVICE)
def wms_url(self):
return self.access_url(WMS_SERVICE)
@staticmethod
def _modified(soup):
modified = None
if soup.date:
if soup.date.get('type') == 'modified':
modified = soup.date.text
return modified
@staticmethod
def _bytes(soup):
size = None
if soup.dataSize:
try:
datasize = float(soup.dataSize.text)
units = soup.dataSize.get('units')
size = size_in_bytes(datasize, units)
except:
logger.exception("dataset size conversion failed")
return size
| apache-2.0 | -6,310,311,489,160,708,000 | 29.694581 | 118 | 0.606965 | false |
Eric89GXL/sphinx-gallery | sphinx_gallery/backreferences.py | 1 | 10042 | # -*- coding: utf-8 -*-
# Author: Óscar Nájera
# License: 3-clause BSD
"""
Backreferences Generator
========================
Parses example file code in order to keep track of used functions
"""
from __future__ import print_function, unicode_literals
import ast
import codecs
import collections
from html import escape
import os
import re
import warnings
from . import sphinx_compatibility
from .scrapers import _find_image_ext
from .utils import _replace_md5
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code.
Only retains names from imported modules.
"""
def __init__(self, global_variables=None):
super(NameFinder, self).__init__()
self.imported_names = {}
self.global_variables = global_variables or {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
class_attr = False
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name, class_attr
elif local_name in self.global_variables:
obj = self.global_variables[local_name]
if remainder and remainder[0] == '.': # maybe meth or attr
method = [remainder[1:]]
class_attr = True
else:
method = []
# Recurse through all levels of bases
classes = [obj.__class__]
offset = 0
while offset < len(classes):
for base in classes[offset].__bases__:
if base not in classes:
classes.append(base)
offset += 1
for cc in classes:
module = cc.__module__.split('.')
class_name = cc.__name__
# a.b.C.meth could be documented as a.C.meth,
# so go down the list
for depth in range(len(module), 0, -1):
full_name = '.'.join(
module[:depth] + [class_name] + method)
yield name, full_name, class_attr
def _from_import(a, b):
imp_line = 'from %s import %s' % (a, b)
scope = dict()
with warnings.catch_warnings(record=True): # swallow warnings
warnings.simplefilter('ignore')
exec(imp_line, scope, scope)
return scope
def _get_short_module_name(module_name, obj_name):
"""Get the shortest possible module name."""
if '.' in obj_name:
obj_name, attr = obj_name.split('.')
else:
attr = None
scope = {}
try:
# Find out what the real object is supposed to be.
scope = _from_import(module_name, obj_name)
except Exception: # wrong object
return None
else:
real_obj = scope[obj_name]
if attr is not None and not hasattr(real_obj, attr): # wrong class
return None # wrong object
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
scope = {}
try:
scope = _from_import(short_name, obj_name)
# Ensure shortened object is the same as what we expect.
assert real_obj is scope[obj_name]
except Exception: # libraries can throw all sorts of exceptions...
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
_regex = re.compile(r':(?:'
r'func(?:tion)?|'
r'meth(?:od)?|'
r'attr(?:ibute)?|'
r'obj(?:ect)?|'
r'class):`(\S*)`'
)
def identify_names(script_blocks, global_variables=None, node=''):
"""Build a codeobj summary by identifying and resolving used names."""
if node == '': # mostly convenience for testing functions
c = '\n'.join(txt for kind, txt, _ in script_blocks if kind == 'code')
node = ast.parse(c)
# Get matches from the code (AST)
finder = NameFinder(global_variables)
if node is not None:
finder.visit(node)
names = list(finder.get_mapping())
# Get matches from docstring inspection
text = '\n'.join(txt for kind, txt, _ in script_blocks if kind == 'text')
names.extend((x, x, False) for x in re.findall(_regex, text))
example_code_obj = collections.OrderedDict() # order is important
fill_guess = dict()
for name, full_name, class_like in names:
if name in example_code_obj:
continue # if someone puts it in the docstring and code
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
splitted = full_name.rsplit('.', 1 + class_like)
if len(splitted) == 1:
splitted = ('builtins', splitted[0])
elif len(splitted) == 3: # class-like
assert class_like
splitted = (splitted[0], '.'.join(splitted[1:]))
else:
assert not class_like
module, attribute = splitted
# get shortened module name
module_short = _get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
if module_short is not None:
example_code_obj[name] = cobj
elif name not in fill_guess:
cobj['module_short'] = module
fill_guess[name] = cobj
for key, value in fill_guess.items():
if key not in example_code_obj:
example_code_obj[key] = value
return example_code_obj
THUMBNAIL_TEMPLATE = """
.. raw:: html
<div class="sphx-glr-thumbcontainer" tooltip="{snippet}">
.. only:: html
.. figure:: /{thumbnail}
:ref:`sphx_glr_{ref_name}`
.. raw:: html
</div>
"""
BACKREF_THUMBNAIL_TEMPLATE = THUMBNAIL_TEMPLATE + """
.. only:: not html
* :ref:`sphx_glr_{ref_name}`
"""
def _thumbnail_div(target_dir, src_dir, fname, snippet, is_backref=False,
check=True):
"""Generate RST to place a thumbnail in a gallery."""
thumb, _ = _find_image_ext(
os.path.join(target_dir, 'images', 'thumb',
'sphx_glr_%s_thumb.png' % fname[:-3]))
if check and not os.path.isfile(thumb):
# This means we have done something wrong in creating our thumbnail!
raise RuntimeError('Could not find internal sphinx-gallery thumbnail '
'file:\n%s' % (thumb,))
thumb = os.path.relpath(thumb, src_dir)
full_dir = os.path.relpath(target_dir, src_dir)
# Inside rst files forward slash defines paths
thumb = thumb.replace(os.sep, "/")
ref_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
template = BACKREF_THUMBNAIL_TEMPLATE if is_backref else THUMBNAIL_TEMPLATE
return template.format(snippet=escape(snippet),
thumbnail=thumb, ref_name=ref_name)
def _write_backreferences(backrefs, seen_backrefs, gallery_conf,
target_dir, fname, snippet):
"""Write backreference file including a thumbnail list of examples."""
if gallery_conf['backreferences_dir'] is None:
return
for backref in backrefs:
include_path = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'],
'%s.examples.new' % backref)
seen = backref in seen_backrefs
with codecs.open(include_path, 'a' if seen else 'w',
encoding='utf-8') as ex_file:
if not seen:
heading = 'Examples using ``%s``' % backref
ex_file.write('\n\n' + heading + '\n')
ex_file.write('^' * len(heading) + '\n')
ex_file.write(_thumbnail_div(target_dir, gallery_conf['src_dir'],
fname, snippet, is_backref=True))
seen_backrefs.add(backref)
def _finalize_backreferences(seen_backrefs, gallery_conf):
"""Replace backref files only if necessary."""
logger = sphinx_compatibility.getLogger('sphinx-gallery')
if gallery_conf['backreferences_dir'] is None:
return
for backref in seen_backrefs:
path = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'],
'%s.examples.new' % backref)
if os.path.isfile(path):
_replace_md5(path)
else:
level = gallery_conf['log_level'].get('backreference_missing',
'warning')
func = getattr(logger, level)
func('Could not find backreferences file: %s' % (path,))
func('The backreferences are likely to be erroneous '
'due to file system case insensitivity.')
| bsd-3-clause | 6,789,566,557,589,119,000 | 34.985663 | 79 | 0.555777 | false |
snark/ignorance | tests/test_git_walk.py | 1 | 6883 | import ignorance
import os
try:
# pathlib is in python stdlib in python 3.5+
from pathlib import Path
except ImportError:
from pathlib2 import Path
import pytest
def test_basic_walk(tmpdir_builder):
path = tmpdir_builder.setup('git/basic_match')
files = []
for r, d, f in ignorance.git.walk(path):
files.extend(f)
assert files == ['.gitignore', 'bam', 'foo', 'ignored', 'zap']
def test_negation(tmpdir_builder):
path = tmpdir_builder.setup('git/negation')
files = []
for r, d, f in ignorance.git.walk(path):
files.extend(f)
assert 'bar' in files
assert 'baz.tmpx' in files
assert 'override.tmp' in files
assert 'quux' in files
assert 'foo.tmp' not in files
assert 'order_counts.tmp' not in files
def test_overrides(tmpdir_builder):
path = tmpdir_builder.setup('git/negation')
pathobj = Path(path)
files = []
overrides = ['*.tmpx', '!foo.tmp', 'override.*']
for r, d, fs in ignorance.git.walk(path, overrides=overrides):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'bar' in files
assert 'baz.tmpx' not in files
assert 'override.tmp' not in files
assert 'zap/baz/quux' in files
assert 'foo.tmp' in files
assert 'order_counts.tmp' not in files
assert 'zap/foo.tmp' in files
# Overrides are rooted to the starting directory.
files = []
overrides = ['!foo.tmp', 'zap/foo.tmp']
for r, d, fs in ignorance.git.walk(path, overrides=overrides):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'foo.tmp' in files
assert 'zap/foo' not in files
def test_directory_only(tmpdir_builder):
path = tmpdir_builder.setup('git/directory-only')
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'foo/bar' not in files
assert 'foo/baz' not in files
assert 'foo/foo' not in files
assert 'bar/bar' not in files
assert 'bar/baz' not in files
assert 'bar/foo' not in files
assert 'baz/bar' not in files
# foo/ is directory only, so...
assert 'baz/foo' in files
# Unmatched by anything.
assert 'baz/baz' in files
def test_ignore_completely(tmpdir_builder):
path = tmpdir_builder.setup('git/ignore-completely-1')
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
# Default ignore is '.git'
assert '.git/foo' not in files
assert 'foo' in files
assert 'bar/baz' in files
assert 'bar/zap' in files
assert 'baz/.git' not in files
# Ignore-completely may be changed in the caller
files = []
for r, d, fs in ignorance.git.walk(path,
ignore_completely=['foo', 'bar/']):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert '.git/foo' not in files
assert '.git/bar' in files
assert 'foo' not in files
assert 'bar/baz' not in files
assert 'bar/zap' not in files
assert 'baz/.git' in files
assert 'zap/foo' not in files
# No negation rules allowed in ignore-completely
with pytest.raises(ValueError) as einfo:
for r, d, fs in ignorance.git.walk(
path, ignore_completely=['foo', 'bar/', '!baz']):
pass
assert str(einfo.value) == 'negation rules are not allowed in the ignore'\
+ ' completely rules'
# Ignore-completely may be disabled in the caller
for r, d, fs in ignorance.git.walk(path, ignore_completely=False):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert '.git/foo' in files
assert '.git/bar' in files
assert 'foo' in files
assert 'bar/baz' in files
assert 'bar/zap' in files
assert 'baz/.git' in files
assert 'zap/foo' in files
path = tmpdir_builder.setup('git/ignore-completely-2')
# Ignore completely is non-overrideable within ignore files
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'foo' in files
assert '.git/foo' not in files
assert 'bar/baz' in files
assert 'baz/.git' not in files
def test_nesting(tmpdir_builder):
path = tmpdir_builder.setup('git/nesting')
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
assert 'foo' not in files
assert 'dir_a/foo' in files
assert 'dir_a/bar' not in files
assert 'dir_a/baz' in files
assert 'dir_b/foo' not in files
assert 'dir_b/bar' not in files
assert 'dir_b/baz' not in files
assert 'dir_b/dir_a/foo' not in files
# Anchoring is relative *to the gitignore file*
assert 'dir_b/dir_a/bar' in files
assert 'dir_b/dir_a/baz' not in files
def test_anchoring(tmpdir_builder):
path = tmpdir_builder.setup('git/anchoring')
pathobj = Path(path)
files = []
for r, d, fs in ignorance.git.walk(path):
fs = [str(Path(os.path.join(r, f)).relative_to(pathobj)) for f in fs]
files.extend(fs)
# foo is unanchored
assert not any([f for f in files if 'foo' in f])
# dir_a/bar is anchored to the .gitignore file
assert 'dir_a/bar' not in files
assert 'dir_b/dir_a/bar' in files
# dir_a/baz is not anchored, due to a double-asterisk
assert 'dir_a/baz' not in files
assert 'dir_b/dir_a/baz' not in files
# */zap is anchored to the .gitignore file
assert 'dir_a/zap' not in files
assert 'dir_b/zap' not in files
assert 'dir_c/zap' not in files
assert 'dir_b/dir_a/zap' in files
assert 'dir_c/1/zap' in files
assert 'dir_c/2/1/zap' in files
# Any quux under dir_c should be ignored, due to a double-asterisk
assert 'dir_a/quux' in files
assert 'quux' in files
assert 'dir_c/quux' not in files
assert 'dir_c/1/quux' not in files
assert 'dir_c/2/1/quux' not in files
# Leading slash anchors to the root
assert 'xyzzy' not in files
assert 'dir_a/xyzzy' in files
# Finally, any .eggs file under spam/ should be ignored
assert 'dir_a/spam.eggs' in files
assert 'spam/ham/eggs' in files
assert 'spam/spam/eggs' in files
assert 'spam/ham.eggs' not in files
assert 'spam/ham/ham.eggs' not in files
assert 'spam/ham/spam.eggs' not in files
assert 'spam/spam/ham.eggs' not in files
assert 'spam/spam/spam.eggs' not in files
| isc | 1,623,514,140,519,473,200 | 34.663212 | 78 | 0.634171 | false |
renmengye/resnet | resnet/utils/lr_schedule.py | 1 | 3191 | """Learning rate scheduler utilities."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from resnet.utils import logger
log = logger.get()
class FixedLearnRateScheduler(object):
"""Adjusts learning rate according to a fixed schedule."""
def __init__(self, sess, model, base_lr, lr_decay_steps, lr_list=None):
"""
Args:
sess: TensorFlow session object.
model: Model object.
base_lr: Base learning rate.
lr_decay_steps: A list of step number which we perform learning decay.
lr_list: A list of learning rate decay multiplier. By default, all 0.1.
"""
self.model = model
self.sess = sess
self.lr = base_lr
self.lr_list = lr_list
self.lr_decay_steps = lr_decay_steps
self.model.assign_lr(self.sess, self.lr)
def step(self, niter):
"""Adds to counter. Adjusts learning rate if necessary.
Args:
niter: Current number of iterations.
"""
if len(self.lr_decay_steps) > 0:
if (niter + 1) == self.lr_decay_steps[0]:
if self.lr_list is not None:
self.lr = self.lr_list[0]
else:
self.lr *= 0.1 ## Divide 10 by default!!!
self.model.assign_lr(self.sess, self.lr)
self.lr_decay_steps.pop(0)
log.warning("LR decay steps {}".format(self.lr_decay_steps))
if self.lr_list is not None:
self.lr_list.pop(0)
elif (niter + 1) > self.lr_decay_steps[0]:
ls = self.lr_decay_steps
while len(ls) > 0 and (niter + 1) > ls[0]:
ls.pop(0)
log.warning("LR decay steps {}".format(self.lr_decay_steps))
if self.lr_list is not None:
self.lr = self.lr_list.pop(0)
else:
self.lr *= 0.1
self.model.assign_lr(self.sess, self.lr)
class ExponentialLearnRateScheduler(object):
"""Adjusts learning rate according to an exponential decay schedule."""
def __init__(self, sess, model, base_lr, offset_steps, total_steps, final_lr,
interval):
"""
Args:
sess: TensorFlow session object.
model: Model object.
base_lr: Base learning rate.
offset_steps: Initial non-decay steps.
total_steps: Total number of steps.
final_lr: Final learning rate by the end of training.
interval: Number of steps in between learning rate updates (staircase).
"""
self.model = model
self.sess = sess
self.lr = base_lr
self.offset_steps = offset_steps
self.total_steps = total_steps
self.time_constant = (total_steps - offset_steps) / np.log(base_lr /
final_lr)
self.final_lr = final_lr
self.interval = interval
self.model.assign_lr(self.sess, self.lr)
def step(self, niter):
"""Adds to counter. Adjusts learning rate if necessary.
Args:
niter: Current number of iterations.
"""
if niter > self.offset_steps:
steps2 = niter - self.offset_steps
if steps2 % self.interval == 0:
new_lr = base_lr * np.exp(-steps2 / self.time_constant)
self.model.assign_lr(self.sess, new_lr)
| mit | -5,248,840,209,165,210,000 | 32.239583 | 79 | 0.606393 | false |
sunjinopensource/threadactive | examples/example1/main.py | 1 | 1330 | import time
import threading
import threadactive
class BackWorker(threadactive.Agent):
def tick(self):
threadactive.Agent.tick(self)
print("[%s][%d] front" % (threading.current_thread().getName(), time.clock()) )
self.print_in_front2()
self.print_in_back()
time.sleep(1)
@threadactive.backend
def print_in_back(self, *args, **kwargs):
print("[%s][%d] back" % (threading.current_thread().getName(), time.clock()) )
self.print_in_back2()
if time.clock() > 3:
self.back_to_front()
@threadactive.frontend
def back_to_front(self, *args, **kwargs):
print("[%s][%d] back to front" % (threading.current_thread().getName(), time.clock()) )
@threadactive.frontend
def print_in_front2(self, *args, **kwargs):
print("[%s][%d] front2" % (threading.current_thread().getName(), time.clock()) )
@threadactive.backend
def print_in_back2(self, *args, **kwargs):
print("[%s][%d] back2" % (threading.current_thread().getName(), time.clock()) )
def main():
i = 0
bw = BackWorker()
while True:
bw.tick()
# restart backend thread
i += 1
if i > 5:
bw.stop_backend()
bw.start_backend()
i = 0
if __name__ == '__main__':
main() | mit | 4,238,513,690,836,113,000 | 26.729167 | 95 | 0.56391 | false |
kret0s/gnuhealth-live | tryton/server/trytond-3.8.3/trytond/tools/datetime_strftime.py | 1 | 1478 | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
# Copyright (c) 2002-2007 John D. Hunter; All Rights Reserved
import time
def datetime_strftime(date, fmt):
'''
Allow datetime strftime formatting for years before 1900.
See http://bugs.python.org/issue1777412
'''
if date.year > 1900:
return date.strftime(fmt)
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while True:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
year = date.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = date.timetuple()
string1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(string1, str(year))
string2 = time.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = _findall(string2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
syear = "%4d" % (date.year,)
for site in sites:
string1 = string1[:site] + syear + string1[site + 4:]
return string1
| gpl-3.0 | -6,176,057,413,424,030,000 | 28.56 | 72 | 0.583221 | false |
codexgigassys/codex-backend | src/Utils/PEHeaderReader.py | 1 | 4752 | # Copyright (C) 2016 Deloitte Argentina.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
import pefile
import math
import os
import sys
import shutil
import time
from test import test
class PEHeaderReader():
# def __init__(self,file):
# self.pe=pefile.PE(file,fast_load=False)
# #self.pe=pefile.PE(file,fast_load=True)
def __init__(self, data):
self.pe = None
try:
self.pe = pefile.PE(data=data, fast_load=True)
except Exception, e:
print str(e)
return None
# try:
# self.pe=pefile.PE(data=data,fast_load=False)
# except:
# self.pe=pefile.PE(data=data,fast_load=True)
def get_import_size(self):
# self.pe.parse_data_directories() # si it has fast load.
sizes = []
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
sizes.append(len(entry.imports))
return sizes
def get_import_size_stats(self):
# self.pe.parse_data_directories() # si if has fast load.
total = 0
if (self.pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']].VirtualAddress == 0):
return 0, 0, 0
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
total = total + len(entry.imports)
# print entry.dll
# for imp in entry.imports:
# print '\t', hex(imp.address), imp.name
cant_librerias = (len(self.pe.DIRECTORY_ENTRY_IMPORT))
total_imports = total
promedio = total / cant_librerias
return total_imports, cant_librerias, promedio
def get_section_stats(self):
real_sum = 0
virtual_sum = 0
w_e = 0
w_real_sum = 0
w_virtual_sum = 0
for section in self.pe.sections:
real = int(hex(section.SizeOfRawData), 16)
virtual = int(hex(section.Misc_VirtualSize), 16)
real_sum += real
virtual_sum += virtual
# print(hex(section.Characteristics))
if (section.__dict__.get('IMAGE_SCN_MEM_WRITE', False) and
section.__dict__.get('IMAGE_SCN_MEM_EXECUTE', False)):
# print("Write Exe")
w_e += 1
w_real_sum += real
w_virtual_sum += virtual
# print (section.Name, real,virtual,rate)
# print("")
return real, virtual, w_e, w_real_sum, w_virtual_sum
def getArquitecture(self):
try:
if(self.pe.OPTIONAL_HEADER.Magic == int("0x020B", 16)):
return ("PE+")
elif(self.pe.OPTIONAL_HEADER.Magic == int("0x010B", 16)):
return ("PE")
elif(self.pe.OPTIONAL_HEADER.Magic == int("0x0107", 16)):
return ("IMG_ROM")
else:
return "UNKNOWN"
except pefile.PEFormatError:
return "FORMAT"
return None
def getImports(self):
if (self.pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']].VirtualAddress == 0):
return None
d = {}
# print(self.pe.DIRECTORY_ENTRY_IMPORT)
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
aux = []
for i in range(len(entry.dll)):
if(ord(entry.dll[i]) >= 128):
aux.append('.')
else:
aux.append(entry.dll[i])
dll_name = "".join(aux)
# print entry.dll
# print entry.imports
l = []
for imp in entry.imports:
l.append(str(imp.name))
# print '\t', hex(imp.address), imp.name
d[unicode(str(dll_name), "utf-8")] = l
return d
def load(self):
self.pe.parse_data_directories()
# ****************TEST_CODE******************
def testCode():
file = "../Test_files/test.exe"
data = open(file, "rb").read()
start_time = time.time()
cr = PEHeaderReader(data=data)
cr.load()
total_imports, cant_librerias, promedio = cr.get_import_size_stats()
real, virtual, w_e, w_real_sum, w_virtual_sum = cr.get_section_stats()
elapsed = time.time() - start_time
line1 = str(total_imports) + "|" + \
str(cant_librerias) + "|" + str(promedio)
line2 = str(real) + "|" + str(virtual) + "|" + str(w_e) + \
"|" + str(w_real_sum) + "|" + str(w_virtual_sum)
print(line1)
print(line2)
imp = cr.getImports()
print(str(imp))
print("Elapsed time: " + str(elapsed))
# ****************TEST_EXECUTE******************
test("-test_PEHeaderReader", testCode)
| mit | -6,858,042,678,559,217,000 | 29.658065 | 128 | 0.53851 | false |
chovanecm/sacredboard | sacredboard/app/data/pymongo/metricsdao.py | 1 | 3216 | """
Module responsible for accessing the Metrics data in MongoDB.
Issue: https://github.com/chovanecm/sacredboard/issues/60
"""
from bson import ObjectId
from bson.errors import InvalidId
from sacredboard.app.data import NotFoundError
from .genericdao import GenericDAO
from ..metricsdao import MetricsDAO
class MongoMetricsDAO(MetricsDAO):
"""Implementation of MetricsDAO for MongoDB."""
def __init__(self, generic_dao: GenericDAO):
"""
Create new metrics accessor for MongoDB.
:param generic_dao: A configured generic MongoDB data access object
pointing to an appropriate database.
"""
self.generic_dao = generic_dao
self.metrics_collection_name = "metrics"
"""Name of the MongoDB collection with metrics."""
def get(self, run_id, metric_id):
"""
Read a metric of the given id and run.
The returned object has the following format (timestamps are datetime
objects).
.. code::
{"steps": [0,1,20,40,...],
"timestamps": [timestamp1,timestamp2,timestamp3,...],
"values": [0,1 2,3,4,5,6,...],
"name": "name of the metric",
"metric_id": "metric_id",
"run_id": "run_id"}
:param run_id: ID of the Run that the metric belongs to.
:param metric_id: The ID fo the metric.
:return: The whole metric as specified.
:raise NotFoundError
"""
run_id = self._parse_run_id(run_id)
query = self._build_query(run_id, metric_id)
row = self._read_metric_from_db(metric_id, run_id, query)
metric = self._to_intermediary_object(row)
return metric
def delete(self, run_id):
"""
Delete all metrics belonging to the given run.
:param run_id: ID of the Run that the metric belongs to.
"""
self.generic_dao.delete_record(
self.metrics_collection_name,
{"run_id": self._parse_run_id(run_id)})
def _read_metric_from_db(self, metric_id, run_id, query):
row = self.generic_dao.find_record(self.metrics_collection_name,
query)
if row is None:
raise NotFoundError("Metric %s for run %s not found."
% (metric_id, run_id))
return row
def _parse_run_id(self, run_id):
id = None
try:
id = int(run_id)
except ValueError:
id = run_id
return id
def _build_query(self, run_id, metric_id):
# Metrics in MongoDB is always an ObjectId
try:
id = ObjectId(metric_id)
return {"run_id": self._parse_run_id(run_id), "_id": id}
except InvalidId as ex:
raise NotFoundError("Metric Id %s is invalid "
"ObjectId in MongoDB" % metric_id) from ex
def _to_intermediary_object(self, row):
return {
"metric_id": str(row["_id"]),
"run_id": row["run_id"],
"name": row["name"],
"steps": row["steps"],
"timestamps": row["timestamps"],
"values": row["values"],
}
| mit | 2,635,387,023,477,506,000 | 31.16 | 77 | 0.560634 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.